diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 1116 |
1 files changed, 681 insertions, 435 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index abe94a55ecad..5f20cadee343 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -154,21 +154,23 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso * */ -static ssize_t amdgpu_get_dpm_state(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t amdgpu_get_power_dpm_state(struct device *dev, + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) { if (adev->smu.ppt_funcs->get_current_power_state) @@ -189,18 +191,18 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); } -static ssize_t amdgpu_set_dpm_state(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) +static ssize_t amdgpu_set_power_dpm_state(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type state; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; if (strncmp("battery", buf, strlen("battery")) == 0) state = POWER_STATE_TYPE_BATTERY; @@ -212,8 +214,10 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, return -EINVAL; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) { mutex_lock(&adev->pm.mutex); @@ -294,21 +298,23 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, * */ -static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; enum amd_dpm_forced_level level = 0xff; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) level = smu_get_performance_level(&adev->smu); @@ -332,10 +338,10 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, "unknown"); } -static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) +static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; @@ -343,8 +349,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, enum amd_dpm_forced_level current_level = 0xff; int ret = 0; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; if (strncmp("low", buf, strlen("low")) == 0) { level = AMD_DPM_FORCED_LEVEL_LOW; @@ -369,8 +375,10 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, } ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) current_level = smu_get_performance_level(&adev->smu); @@ -383,6 +391,15 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, return count; } + if (adev->asic_type == CHIP_RAVEN) { + if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { + if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL) + amdgpu_gfx_off_ctrl(adev, false); + else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL) + amdgpu_gfx_off_ctrl(adev, true); + } + } + /* profile_exit setting is valid only when current mode is in profile mode */ if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | @@ -436,16 +453,24 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, struct pp_states_info data; int i, buf_len, ret; + if (adev->in_gpu_reset) + return -EPERM; + ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) { ret = smu_get_power_num_states(&adev->smu, &data); if (ret) return ret; - } else if (adev->powerplay.pp_funcs->get_pp_num_states) + } else if (adev->powerplay.pp_funcs->get_pp_num_states) { amdgpu_dpm_get_pp_num_states(adev, &data); + } else { + memset(&data, 0, sizeof(data)); + } pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -472,12 +497,14 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, enum amd_pm_state_type pm = 0; int i = 0, ret = 0; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) { pm = smu_get_current_power_state(smu); @@ -511,8 +538,8 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; if (adev->pp_force_state_enabled) return amdgpu_get_pp_cur_state(dev, attr, buf); @@ -531,8 +558,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, unsigned long idx; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; if (strlen(buf) == 1) adev->pp_force_state_enabled = false; @@ -552,8 +579,10 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, state = data.states[idx]; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } /* only set user selected power states */ if (state != POWER_STATE_TYPE_INTERNAL_BOOT && @@ -589,12 +618,14 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, char *table = NULL; int size, ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) { size = smu_sys_get_pp_table(&adev->smu, (void **)&table); @@ -631,12 +662,14 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; int ret = 0; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) { ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count); @@ -681,7 +714,7 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, * default power levels, write "r" (reset) to the file to reset them. * * - * < For Vega20 > + * < For Vega20 and newer ASICs > * * Reading the file will display: * @@ -736,8 +769,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, const char delimiter[3] = {' ', '\n', '\0'}; uint32_t type; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; if (count > 127) return -EINVAL; @@ -763,8 +796,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, tmp_str++; while (isspace(*++tmp_str)); - while (tmp_str[0]) { - sub_str = strsep(&tmp_str, delimiter); + while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); if (ret) return -EINVAL; @@ -775,8 +807,10 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, } ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) { ret = smu_od_edit_dpm_table(&adev->smu, type, @@ -828,12 +862,14 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, ssize_t size; int ret; - if (amdgpu_sriov_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) { size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf); @@ -870,18 +906,18 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, * the corresponding bit from original ppfeature masks and input the * new ppfeature masks. */ -static ssize_t amdgpu_set_pp_feature_status(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) +static ssize_t amdgpu_set_pp_features(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; uint64_t featuremask; int ret; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = kstrtou64(buf, 0, &featuremask); if (ret) @@ -890,8 +926,10 @@ static ssize_t amdgpu_set_pp_feature_status(struct device *dev, pr_debug("featuremask = 0x%llx\n", featuremask); ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) { ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask); @@ -914,21 +952,23 @@ static ssize_t amdgpu_set_pp_feature_status(struct device *dev, return count; } -static ssize_t amdgpu_get_pp_feature_status(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t amdgpu_get_pp_features(struct device *dev, + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; ssize_t size; int ret; - if (amdgpu_sriov_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) size = smu_sys_get_pp_feature_mask(&adev->smu, buf); @@ -982,12 +1022,14 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, ssize_t size; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); @@ -1024,8 +1066,7 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) memcpy(buf_cpy, buf, bytes); buf_cpy[bytes] = '\0'; tmp = buf_cpy; - while (tmp[0]) { - sub_str = strsep(&tmp, delimiter); + while ((sub_str = strsep(&tmp, delimiter)) != NULL) { if (strlen(sub_str)) { ret = kstrtol(sub_str, 0, &level); if (ret) @@ -1048,19 +1089,21 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, int ret; uint32_t mask = 0; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true); + ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); @@ -1082,12 +1125,14 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, ssize_t size; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); @@ -1112,19 +1157,21 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, uint32_t mask = 0; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true); + ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); @@ -1146,12 +1193,14 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, ssize_t size; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); @@ -1176,19 +1225,21 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, int ret; uint32_t mask = 0; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true); + ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); else @@ -1212,12 +1263,14 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, ssize_t size; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); @@ -1242,19 +1295,21 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, int ret; uint32_t mask = 0; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true); + ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); else @@ -1278,12 +1333,14 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, ssize_t size; int ret; - if (amdgpu_sriov_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); @@ -1308,19 +1365,21 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, int ret; uint32_t mask = 0; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true); + ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); else @@ -1344,12 +1403,14 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, ssize_t size; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); @@ -1374,19 +1435,21 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, int ret; uint32_t mask = 0; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true); + ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); else @@ -1410,12 +1473,14 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, uint32_t value = 0; int ret; - if (amdgpu_sriov_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK); @@ -1438,8 +1503,8 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, int ret; long int value; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + if (adev->in_gpu_reset) + return -EPERM; ret = kstrtol(buf, 0, &value); @@ -1447,8 +1512,10 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, return -EINVAL; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) { value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value); @@ -1479,12 +1546,14 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, uint32_t value = 0; int ret; - if (amdgpu_sriov_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK); @@ -1507,8 +1576,8 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, int ret; long int value; - if (amdgpu_sriov_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = kstrtol(buf, 0, &value); @@ -1516,8 +1585,10 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, return -EINVAL; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) { value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value); @@ -1568,12 +1639,14 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, ssize_t size; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) size = smu_get_power_profile_mode(&adev->smu, buf); @@ -1594,7 +1667,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, const char *buf, size_t count) { - int ret = 0xff; + int ret; struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; uint32_t parameter_size = 0; @@ -1606,15 +1679,15 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, long int profile_mode = 0; const char delimiter[3] = {' ', '\n', '\0'}; + if (adev->in_gpu_reset) + return -EPERM; + tmp[0] = *(buf); tmp[1] = '\0'; ret = kstrtol(tmp, 0, &profile_mode); if (ret) return -EINVAL; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return -EINVAL; - if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { if (count < 2 || count > 127) return -EINVAL; @@ -1622,8 +1695,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, i++; memcpy(buf_cpy, buf, count-i); tmp_str = buf_cpy; - while (tmp_str[0]) { - sub_str = strsep(&tmp_str, delimiter); + while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); if (ret) return -EINVAL; @@ -1635,8 +1707,10 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, parameter[parameter_size] = profile_mode; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } if (is_support_sw_smu(adev)) ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true); @@ -1653,27 +1727,29 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, } /** - * DOC: busy_percent + * DOC: gpu_busy_percent * * The amdgpu driver provides a sysfs API for reading how busy the GPU * is as a percentage. The file gpu_busy_percent is used for this. * The SMU firmware computes a percentage of load based on the * aggregate activity level in the IP cores. */ -static ssize_t amdgpu_get_busy_percent(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; int r, value, size = sizeof(value); - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; r = pm_runtime_get_sync(ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(ddev->dev); return r; + } /* read the IP busy sensor */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, @@ -1696,20 +1772,22 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev, * The SMU firmware computes a percentage of load based on the * aggregate activity level in the IP cores. */ -static ssize_t amdgpu_get_memory_busy_percent(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; int r, value, size = sizeof(value); - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; r = pm_runtime_get_sync(ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(ddev->dev); return r; + } /* read the IP busy sensor */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, @@ -1742,15 +1820,23 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - uint64_t count0, count1; + uint64_t count0 = 0, count1 = 0; int ret; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; + + if (adev->flags & AMD_IS_APU) + return -ENODATA; + + if (!adev->asic_funcs->get_pcie_usage) + return -ENODATA; ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); return ret; + } amdgpu_asic_get_pcie_usage(adev, &count0, &count1); @@ -1778,8 +1864,8 @@ static ssize_t amdgpu_get_unique_id(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; + if (adev->in_gpu_reset) + return -EPERM; if (adev->unique_id) return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id); @@ -1787,57 +1873,255 @@ static ssize_t amdgpu_get_unique_id(struct device *dev, return 0; } -static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); -static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, - amdgpu_get_dpm_forced_performance_level, - amdgpu_set_dpm_forced_performance_level); -static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL); -static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL); -static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR, - amdgpu_get_pp_force_state, - amdgpu_set_pp_force_state); -static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR, - amdgpu_get_pp_table, - amdgpu_set_pp_table); -static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_sclk, - amdgpu_set_pp_dpm_sclk); -static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_mclk, - amdgpu_set_pp_dpm_mclk); -static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_socclk, - amdgpu_set_pp_dpm_socclk); -static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_fclk, - amdgpu_set_pp_dpm_fclk); -static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_dcefclk, - amdgpu_set_pp_dpm_dcefclk); -static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_pcie, - amdgpu_set_pp_dpm_pcie); -static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR, - amdgpu_get_pp_sclk_od, - amdgpu_set_pp_sclk_od); -static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, - amdgpu_get_pp_mclk_od, - amdgpu_set_pp_mclk_od); -static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR, - amdgpu_get_pp_power_profile_mode, - amdgpu_set_pp_power_profile_mode); -static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR, - amdgpu_get_pp_od_clk_voltage, - amdgpu_set_pp_od_clk_voltage); -static DEVICE_ATTR(gpu_busy_percent, S_IRUGO, - amdgpu_get_busy_percent, NULL); -static DEVICE_ATTR(mem_busy_percent, S_IRUGO, - amdgpu_get_memory_busy_percent, NULL); -static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL); -static DEVICE_ATTR(pp_features, S_IRUGO | S_IWUSR, - amdgpu_get_pp_feature_status, - amdgpu_set_pp_feature_status); -static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL); +/** + * DOC: thermal_throttling_logging + * + * Thermal throttling pulls down the clock frequency and thus the performance. + * It's an useful mechanism to protect the chip from overheating. Since it + * impacts performance, the user controls whether it is enabled and if so, + * the log frequency. + * + * Reading back the file shows you the status(enabled or disabled) and + * the interval(in seconds) between each thermal logging. + * + * Writing an integer to the file, sets a new logging interval, in seconds. + * The value should be between 1 and 3600. If the value is less than 1, + * thermal logging is disabled. Values greater than 3600 are ignored. + */ +static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + + return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n", + adev->ddev->unique, + atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled", + adev->throttling_logging_rs.interval / HZ + 1); +} + +static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + long throttling_logging_interval; + unsigned long flags; + int ret = 0; + + ret = kstrtol(buf, 0, &throttling_logging_interval); + if (ret) + return ret; + + if (throttling_logging_interval > 3600) + return -EINVAL; + + if (throttling_logging_interval > 0) { + raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags); + /* + * Reset the ratelimit timer internals. + * This can effectively restart the timer. + */ + adev->throttling_logging_rs.interval = + (throttling_logging_interval - 1) * HZ; + adev->throttling_logging_rs.begin = 0; + adev->throttling_logging_rs.printed = 0; + adev->throttling_logging_rs.missed = 0; + raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags); + + atomic_set(&adev->throttling_logging_enabled, 1); + } else { + atomic_set(&adev->throttling_logging_enabled, 0); + } + + return count; +} + +static struct amdgpu_device_attr amdgpu_device_attrs[] = { + AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC), +}; + +static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, + uint32_t mask, enum amdgpu_device_attr_states *states) +{ + struct device_attribute *dev_attr = &attr->dev_attr; + const char *attr_name = dev_attr->attr.name; + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + enum amd_asic_type asic_type = adev->asic_type; + + if (!(attr->flags & mask)) { + *states = ATTR_STATE_UNSUPPORTED; + return 0; + } + +#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name)) + + if (DEVICE_ATTR_IS(pp_dpm_socclk)) { + if (asic_type < CHIP_VEGA10) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { + if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { + if (asic_type < CHIP_VEGA20) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { + if (asic_type == CHIP_ARCTURUS) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) { + *states = ATTR_STATE_UNSUPPORTED; + if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || + (!is_support_sw_smu(adev) && hwmgr->od_enabled)) + *states = ATTR_STATE_SUPPORTED; + } else if (DEVICE_ATTR_IS(mem_busy_percent)) { + if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pcie_bw)) { + /* PCIe Perf counters won't work on APU nodes */ + if (adev->flags & AMD_IS_APU) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(unique_id)) { + if (asic_type != CHIP_VEGA10 && + asic_type != CHIP_VEGA20 && + asic_type != CHIP_ARCTURUS) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_features)) { + if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10) + *states = ATTR_STATE_UNSUPPORTED; + } + + if (asic_type == CHIP_ARCTURUS) { + /* Arcturus does not support standalone mclk/socclk/fclk level setting */ + if (DEVICE_ATTR_IS(pp_dpm_mclk) || + DEVICE_ATTR_IS(pp_dpm_socclk) || + DEVICE_ATTR_IS(pp_dpm_fclk)) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } + } + +#undef DEVICE_ATTR_IS + + return 0; +} + + +static int amdgpu_device_attr_create(struct amdgpu_device *adev, + struct amdgpu_device_attr *attr, + uint32_t mask, struct list_head *attr_list) +{ + int ret = 0; + struct device_attribute *dev_attr = &attr->dev_attr; + const char *name = dev_attr->attr.name; + enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED; + struct amdgpu_device_attr_entry *attr_entry; + + int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, + uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update; + + BUG_ON(!attr); + + attr_update = attr->attr_update ? attr_update : default_attr_update; + + ret = attr_update(adev, attr, mask, &attr_states); + if (ret) { + dev_err(adev->dev, "failed to update device file %s, ret = %d\n", + name, ret); + return ret; + } + + if (attr_states == ATTR_STATE_UNSUPPORTED) + return 0; + + ret = device_create_file(adev->dev, dev_attr); + if (ret) { + dev_err(adev->dev, "failed to create device file %s, ret = %d\n", + name, ret); + } + + attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL); + if (!attr_entry) + return -ENOMEM; + + attr_entry->attr = attr; + INIT_LIST_HEAD(&attr_entry->entry); + + list_add_tail(&attr_entry->entry, attr_list); + + return ret; +} + +static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr) +{ + struct device_attribute *dev_attr = &attr->dev_attr; + + device_remove_file(adev->dev, dev_attr); +} + +static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev, + struct list_head *attr_list); + +static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev, + struct amdgpu_device_attr *attrs, + uint32_t counts, + uint32_t mask, + struct list_head *attr_list) +{ + int ret = 0; + uint32_t i = 0; + + for (i = 0; i < counts; i++) { + ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list); + if (ret) + goto failed; + } + + return 0; + +failed: + amdgpu_device_attr_remove_groups(adev, attr_list); + + return ret; +} + +static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev, + struct list_head *attr_list) +{ + struct amdgpu_device_attr_entry *entry, *entry_tmp; + + if (list_empty(attr_list)) + return ; + + list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) { + amdgpu_device_attr_remove(adev, entry->attr); + list_del(&entry->entry); + kfree(entry); + } +} static ssize_t amdgpu_hwmon_show_temp(struct device *dev, struct device_attribute *attr, @@ -1847,12 +2131,17 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, int channel = to_sensor_dev_attr(attr)->index; int r, temp = 0, size = sizeof(temp); + if (adev->in_gpu_reset) + return -EPERM; + if (channel >= PP_TEMP_MAX) return -EINVAL; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } switch (channel) { case PP_TEMP_JUNCTION: @@ -1978,9 +2267,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, u32 pwm_mode = 0; int ret; + if (adev->in_gpu_reset) + return -EPERM; + ret = pm_runtime_get_sync(adev->ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return ret; + } if (is_support_sw_smu(adev)) { pwm_mode = smu_get_fan_control_mode(&adev->smu); @@ -2009,13 +2303,18 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, int err, ret; int value; + if (adev->in_gpu_reset) + return -EPERM; + err = kstrtoint(buf, 10, &value); if (err) return err; ret = pm_runtime_get_sync(adev->ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return ret; + } if (is_support_sw_smu(adev)) { smu_set_fan_control_mode(&adev->smu, value); @@ -2058,9 +2357,14 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, u32 value; u32 pwm_mode; + if (adev->in_gpu_reset) + return -EPERM; + err = pm_runtime_get_sync(adev->ddev->dev); - if (err < 0) + if (err < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return err; + } if (is_support_sw_smu(adev)) pwm_mode = smu_get_fan_control_mode(&adev->smu); @@ -2107,9 +2411,14 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, int err; u32 speed = 0; + if (adev->in_gpu_reset) + return -EPERM; + err = pm_runtime_get_sync(adev->ddev->dev); - if (err < 0) + if (err < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return err; + } if (is_support_sw_smu(adev)) err = smu_get_fan_speed_percent(&adev->smu, &speed); @@ -2137,9 +2446,14 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, int err; u32 speed = 0; + if (adev->in_gpu_reset) + return -EPERM; + err = pm_runtime_get_sync(adev->ddev->dev); - if (err < 0) + if (err < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return err; + } if (is_support_sw_smu(adev)) err = smu_get_fan_speed_rpm(&adev->smu, &speed); @@ -2166,9 +2480,14 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, u32 size = sizeof(min_rpm); int r; + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, (void *)&min_rpm, &size); @@ -2191,9 +2510,14 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, u32 size = sizeof(max_rpm); int r; + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, (void *)&max_rpm, &size); @@ -2215,9 +2539,14 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, int err; u32 rpm = 0; + if (adev->in_gpu_reset) + return -EPERM; + err = pm_runtime_get_sync(adev->ddev->dev); - if (err < 0) + if (err < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return err; + } if (is_support_sw_smu(adev)) err = smu_get_fan_speed_rpm(&adev->smu, &rpm); @@ -2244,9 +2573,14 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, u32 value; u32 pwm_mode; + if (adev->in_gpu_reset) + return -EPERM; + err = pm_runtime_get_sync(adev->ddev->dev); - if (err < 0) + if (err < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return err; + } if (is_support_sw_smu(adev)) pwm_mode = smu_get_fan_control_mode(&adev->smu); @@ -2290,9 +2624,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, u32 pwm_mode = 0; int ret; + if (adev->in_gpu_reset) + return -EPERM; + ret = pm_runtime_get_sync(adev->ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return ret; + } if (is_support_sw_smu(adev)) { pwm_mode = smu_get_fan_control_mode(&adev->smu); @@ -2322,6 +2661,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, int value; u32 pwm_mode; + if (adev->in_gpu_reset) + return -EPERM; + err = kstrtoint(buf, 10, &value); if (err) return err; @@ -2334,8 +2676,10 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, return -EINVAL; err = pm_runtime_get_sync(adev->ddev->dev); - if (err < 0) + if (err < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return err; + } if (is_support_sw_smu(adev)) { smu_set_fan_control_mode(&adev->smu, pwm_mode); @@ -2362,9 +2706,14 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, u32 vddgfx; int r, size = sizeof(vddgfx); + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } /* get the voltage */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, @@ -2394,13 +2743,18 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, u32 vddnb; int r, size = sizeof(vddnb); + if (adev->in_gpu_reset) + return -EPERM; + /* only APUs have vddnb */ if (!(adev->flags & AMD_IS_APU)) return -EINVAL; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } /* get the voltage */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, @@ -2431,9 +2785,14 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, int r, size = sizeof(u32); unsigned uw; + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } /* get the voltage */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, @@ -2467,12 +2826,17 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, ssize_t size; int r; + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } if (is_support_sw_smu(adev)) { - smu_get_power_limit(&adev->smu, &limit, true, true); + smu_get_power_limit(&adev->smu, &limit, true); size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); @@ -2496,12 +2860,17 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, ssize_t size; int r; + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } if (is_support_sw_smu(adev)) { - smu_get_power_limit(&adev->smu, &limit, false, true); + smu_get_power_limit(&adev->smu, &limit, false); size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); @@ -2526,6 +2895,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, int err; u32 value; + if (adev->in_gpu_reset) + return -EPERM; + if (amdgpu_sriov_vf(adev)) return -EINVAL; @@ -2537,8 +2909,10 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, err = pm_runtime_get_sync(adev->ddev->dev); - if (err < 0) + if (err < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return err; + } if (is_support_sw_smu(adev)) err = smu_set_power_limit(&adev->smu, value); @@ -2564,9 +2938,14 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, uint32_t sclk; int r, size = sizeof(sclk); + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } /* get the sclk */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, @@ -2578,7 +2957,7 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000); + return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000); } static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, @@ -2596,9 +2975,14 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, uint32_t mclk; int r, size = sizeof(mclk); + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } /* get the sclk */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, @@ -2610,7 +2994,7 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000); + return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000); } static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, @@ -3171,21 +3555,34 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) { int ret = 0; - ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); - if (ret) - DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", - enable ? "enable" : "disable", ret); - - /* enable/disable Low Memory PState for UVD (4k videos) */ - if (adev->asic_type == CHIP_STONEY && - adev->uvd.decode_image_width >= WIDTH_4K) { - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + if (adev->family == AMDGPU_FAMILY_SI) { + mutex_lock(&adev->pm.mutex); + if (enable) { + adev->pm.dpm.uvd_active = true; + adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; + } else { + adev->pm.dpm.uvd_active = false; + } + mutex_unlock(&adev->pm.mutex); - if (hwmgr && hwmgr->hwmgr_func && - hwmgr->hwmgr_func->update_nbdpm_pstate) - hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, - !enable, - true); + amdgpu_pm_compute_clocks(adev); + } else { + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); + if (ret) + DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", + enable ? "enable" : "disable", ret); + + /* enable/disable Low Memory PState for UVD (4k videos) */ + if (adev->asic_type == CHIP_STONEY && + adev->uvd.decode_image_width >= WIDTH_4K) { + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + + if (hwmgr && hwmgr->hwmgr_func && + hwmgr->hwmgr_func->update_nbdpm_pstate) + hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, + !enable, + true); + } } } @@ -3193,10 +3590,24 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) { int ret = 0; - ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); - if (ret) - DRM_ERROR("Dpm %s vce failed, ret = %d. \n", - enable ? "enable" : "disable", ret); + if (adev->family == AMDGPU_FAMILY_SI) { + mutex_lock(&adev->pm.mutex); + if (enable) { + adev->pm.dpm.vce_active = true; + /* XXX select vce level based on ring/task */ + adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; + } else { + adev->pm.dpm.vce_active = false; + } + mutex_unlock(&adev->pm.mutex); + + amdgpu_pm_compute_clocks(adev); + } else { + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); + if (ret) + DRM_ERROR("Dpm %s vce failed, ret = %d. \n", + enable ? "enable" : "disable", ret); + } } void amdgpu_pm_print_power_states(struct amdgpu_device *adev) @@ -3238,8 +3649,8 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) { - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; int ret; + uint32_t mask = 0; if (adev->pm.sysfs_initialized) return 0; @@ -3247,6 +3658,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) if (adev->pm.dpm_enabled == 0) return 0; + INIT_LIST_HEAD(&adev->pm.pm_attr_list); + adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, DRIVER_NAME, adev, hwmon_groups); @@ -3257,160 +3670,26 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) return ret; } - ret = device_create_file(adev->dev, &dev_attr_power_dpm_state); - if (ret) { - DRM_ERROR("failed to create device file for dpm state\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); - if (ret) { - DRM_ERROR("failed to create device file for dpm state\n"); - return ret; - } - - - ret = device_create_file(adev->dev, &dev_attr_pp_num_states); - if (ret) { - DRM_ERROR("failed to create device file pp_num_states\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); - if (ret) { - DRM_ERROR("failed to create device file pp_cur_state\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_force_state); - if (ret) { - DRM_ERROR("failed to create device file pp_force_state\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_table); - if (ret) { - DRM_ERROR("failed to create device file pp_table\n"); - return ret; - } - - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_sclk\n"); - return ret; - } - - /* Arcturus does not support standalone mclk/socclk/fclk level setting */ - if (adev->asic_type == CHIP_ARCTURUS) { - dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO; - dev_attr_pp_dpm_mclk.store = NULL; - - dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO; - dev_attr_pp_dpm_socclk.store = NULL; - - dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO; - dev_attr_pp_dpm_fclk.store = NULL; + switch (amdgpu_virt_get_sriov_vf_mode(adev)) { + case SRIOV_VF_MODE_ONE_VF: + mask = ATTR_FLAG_ONEVF; + break; + case SRIOV_VF_MODE_MULTI_VF: + mask = 0; + break; + case SRIOV_VF_MODE_BARE_METAL: + default: + mask = ATTR_FLAG_MASK_ALL; + break; } - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_mclk\n"); - return ret; - } - if (adev->asic_type >= CHIP_VEGA10) { - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_socclk\n"); - return ret; - } - if (adev->asic_type != CHIP_ARCTURUS) { - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_dcefclk\n"); - return ret; - } - } - } - if (adev->asic_type >= CHIP_VEGA20) { - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_fclk\n"); - return ret; - } - } - if (adev->asic_type != CHIP_ARCTURUS) { - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_pcie\n"); - return ret; - } - } - ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); - if (ret) { - DRM_ERROR("failed to create device file pp_sclk_od\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od); - if (ret) { - DRM_ERROR("failed to create device file pp_mclk_od\n"); - return ret; - } - ret = device_create_file(adev->dev, - &dev_attr_pp_power_profile_mode); - if (ret) { - DRM_ERROR("failed to create device file " - "pp_power_profile_mode\n"); - return ret; - } - if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || - (!is_support_sw_smu(adev) && hwmgr->od_enabled)) { - ret = device_create_file(adev->dev, - &dev_attr_pp_od_clk_voltage); - if (ret) { - DRM_ERROR("failed to create device file " - "pp_od_clk_voltage\n"); - return ret; - } - } - ret = device_create_file(adev->dev, - &dev_attr_gpu_busy_percent); - if (ret) { - DRM_ERROR("failed to create device file " - "gpu_busy_level\n"); - return ret; - } - /* APU does not have its own dedicated memory */ - if (!(adev->flags & AMD_IS_APU) && - (adev->asic_type != CHIP_VEGA10)) { - ret = device_create_file(adev->dev, - &dev_attr_mem_busy_percent); - if (ret) { - DRM_ERROR("failed to create device file " - "mem_busy_percent\n"); - return ret; - } - } - /* PCIe Perf counters won't work on APU nodes */ - if (!(adev->flags & AMD_IS_APU)) { - ret = device_create_file(adev->dev, &dev_attr_pcie_bw); - if (ret) { - DRM_ERROR("failed to create device file pcie_bw\n"); - return ret; - } - } - if (adev->unique_id) - ret = device_create_file(adev->dev, &dev_attr_unique_id); - if (ret) { - DRM_ERROR("failed to create device file unique_id\n"); + ret = amdgpu_device_attr_create_groups(adev, + amdgpu_device_attrs, + ARRAY_SIZE(amdgpu_device_attrs), + mask, + &adev->pm.pm_attr_list); + if (ret) return ret; - } - - if ((adev->asic_type >= CHIP_VEGA10) && - !(adev->flags & AMD_IS_APU)) { - ret = device_create_file(adev->dev, - &dev_attr_pp_features); - if (ret) { - DRM_ERROR("failed to create device file " - "pp_features\n"); - return ret; - } - } adev->pm.sysfs_initialized = true; @@ -3419,51 +3698,13 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) { - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - if (adev->pm.dpm_enabled == 0) return; if (adev->pm.int_hwmon_dev) hwmon_device_unregister(adev->pm.int_hwmon_dev); - device_remove_file(adev->dev, &dev_attr_power_dpm_state); - device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); - - device_remove_file(adev->dev, &dev_attr_pp_num_states); - device_remove_file(adev->dev, &dev_attr_pp_cur_state); - device_remove_file(adev->dev, &dev_attr_pp_force_state); - device_remove_file(adev->dev, &dev_attr_pp_table); - - device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); - device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); - if (adev->asic_type >= CHIP_VEGA10) { - device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk); - if (adev->asic_type != CHIP_ARCTURUS) - device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk); - } - if (adev->asic_type != CHIP_ARCTURUS) - device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); - if (adev->asic_type >= CHIP_VEGA20) - device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk); - device_remove_file(adev->dev, &dev_attr_pp_sclk_od); - device_remove_file(adev->dev, &dev_attr_pp_mclk_od); - device_remove_file(adev->dev, - &dev_attr_pp_power_profile_mode); - if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || - (!is_support_sw_smu(adev) && hwmgr->od_enabled)) - device_remove_file(adev->dev, - &dev_attr_pp_od_clk_voltage); - device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); - if (!(adev->flags & AMD_IS_APU) && - (adev->asic_type != CHIP_VEGA10)) - device_remove_file(adev->dev, &dev_attr_mem_busy_percent); - if (!(adev->flags & AMD_IS_APU)) - device_remove_file(adev->dev, &dev_attr_pcie_bw); - if (adev->unique_id) - device_remove_file(adev->dev, &dev_attr_unique_id); - if ((adev->asic_type >= CHIP_VEGA10) && - !(adev->flags & AMD_IS_APU)) - device_remove_file(adev->dev, &dev_attr_pp_features); + + amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list); } void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) @@ -3626,9 +3867,14 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) u32 flags = 0; int r; + if (adev->in_gpu_reset) + return -EPERM; + r = pm_runtime_get_sync(dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(dev->dev); return r; + } amdgpu_device_ip_get_clockgating_state(adev, &flags); seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); |