diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 366 |
1 files changed, 228 insertions, 138 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 194d0c75b072..f205f56e3358 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -67,6 +67,9 @@ static const struct cg_flag_name clocks[] = { {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"}, {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"}, + + {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"}, {0, NULL}, }; @@ -156,12 +159,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; - if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state) - pm = amdgpu_smu_get_current_power_state(adev); - else if (adev->powerplay.pp_funcs->get_current_power_state) + if (is_support_sw_smu(adev)) { + if (adev->smu.ppt_funcs->get_current_power_state) + pm = smu_get_current_power_state(&adev->smu); + else + pm = adev->pm.dpm.user_state; + } else if (adev->powerplay.pp_funcs->get_current_power_state) { pm = amdgpu_dpm_get_current_power_state(adev); - else + } else { pm = adev->pm.dpm.user_state; + } return snprintf(buf, PAGE_SIZE, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : @@ -188,7 +195,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, goto fail; } - if (adev->powerplay.pp_funcs->dispatch_tasks) { + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.user_state = state; + mutex_unlock(&adev->pm.mutex); + } else if (adev->powerplay.pp_funcs->dispatch_tasks) { amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); } else { mutex_lock(&adev->pm.mutex); @@ -272,8 +283,11 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_dpm_forced_level level = 0xff; - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + if (amdgpu_sriov_vf(adev)) + return 0; + + if ((adev->flags & AMD_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return snprintf(buf, PAGE_SIZE, "off\n"); if (is_support_sw_smu(adev)) @@ -311,11 +325,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; - if (is_support_sw_smu(adev)) - current_level = smu_get_performance_level(&adev->smu); - else if (adev->powerplay.pp_funcs->get_performance_level) - current_level = amdgpu_dpm_get_performance_level(adev); - if (strncmp("low", buf, strlen("low")) == 0) { level = AMD_DPM_FORCED_LEVEL_LOW; } else if (strncmp("high", buf, strlen("high")) == 0) { @@ -339,17 +348,23 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, goto fail; } - if (amdgpu_sriov_vf(adev)) { - if (amdgim_is_hwperf(adev) && - adev->virt.ops->force_dpm_level) { - mutex_lock(&adev->pm.mutex); - adev->virt.ops->force_dpm_level(adev, level); - mutex_unlock(&adev->pm.mutex); - return count; - } else { - return -EINVAL; + /* handle sriov case here */ + if (amdgpu_sriov_vf(adev)) { + if (amdgim_is_hwperf(adev) && + adev->virt.ops->force_dpm_level) { + mutex_lock(&adev->pm.mutex); + adev->virt.ops->force_dpm_level(adev, level); + mutex_unlock(&adev->pm.mutex); + return count; + } else { + return -EINVAL; } - } + } + + if (is_support_sw_smu(adev)) + current_level = smu_get_performance_level(&adev->smu); + else if (adev->powerplay.pp_funcs->get_performance_level) + current_level = amdgpu_dpm_get_performance_level(adev); if (current_level == level) return count; @@ -365,18 +380,9 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, } if (is_support_sw_smu(adev)) { - mutex_lock(&adev->pm.mutex); - if (adev->pm.dpm.thermal_active) { - count = -EINVAL; - mutex_unlock(&adev->pm.mutex); - goto fail; - } ret = smu_force_performance_level(&adev->smu, level); if (ret) count = -EINVAL; - else - adev->pm.dpm.forced_level = level; - mutex_unlock(&adev->pm.mutex); } else if (adev->powerplay.pp_funcs->force_performance_level) { mutex_lock(&adev->pm.mutex); if (adev->pm.dpm.thermal_active) { @@ -690,12 +696,12 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, if (ret) return -EINVAL; } else { - if (adev->powerplay.pp_funcs->odn_edit_dpm_table) + if (adev->powerplay.pp_funcs->odn_edit_dpm_table) { ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, parameter_size); - - if (ret) - return -EINVAL; + if (ret) + return -EINVAL; + } if (type == PP_OD_COMMIT_DPM_TABLE) { if (adev->powerplay.pp_funcs->dispatch_tasks) { @@ -721,10 +727,10 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, uint32_t size = 0; if (is_support_sw_smu(adev)) { - size = smu_print_clk_levels(&adev->smu, OD_SCLK, buf); - size += smu_print_clk_levels(&adev->smu, OD_MCLK, buf+size); - size += smu_print_clk_levels(&adev->smu, OD_VDDC_CURVE, buf+size); - size += smu_print_clk_levels(&adev->smu, OD_RANGE, buf+size); + size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf); + size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size); + size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size); + size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size); return size; } else if (adev->powerplay.pp_funcs->print_clock_levels) { size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); @@ -739,10 +745,10 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, } /** - * DOC: ppfeatures + * DOC: pp_features * * The amdgpu driver provides a sysfs API for adjusting what powerplay - * features to be enabled. The file ppfeatures is used for this. And + * features to be enabled. The file pp_features is used for this. And * this is only available for Vega10 and later dGPUs. * * Reading back the file will show you the followings: @@ -754,7 +760,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, * the corresponding bit from original ppfeature masks and input the * new ppfeature masks. */ -static ssize_t amdgpu_set_ppfeature_status(struct device *dev, +static ssize_t amdgpu_set_pp_feature_status(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -771,7 +777,7 @@ static ssize_t amdgpu_set_ppfeature_status(struct device *dev, pr_debug("featuremask = 0x%llx\n", featuremask); if (is_support_sw_smu(adev)) { - ret = smu_set_ppfeature_status(&adev->smu, featuremask); + ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask); if (ret) return -EINVAL; } else if (adev->powerplay.pp_funcs->set_ppfeature_status) { @@ -783,7 +789,7 @@ static ssize_t amdgpu_set_ppfeature_status(struct device *dev, return count; } -static ssize_t amdgpu_get_ppfeature_status(struct device *dev, +static ssize_t amdgpu_get_pp_feature_status(struct device *dev, struct device_attribute *attr, char *buf) { @@ -791,7 +797,7 @@ static ssize_t amdgpu_get_ppfeature_status(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; if (is_support_sw_smu(adev)) { - return smu_get_ppfeature_status(&adev->smu, buf); + return smu_sys_get_pp_feature_mask(&adev->smu, buf); } else if (adev->powerplay.pp_funcs->get_ppfeature_status) return amdgpu_dpm_get_ppfeature_status(adev, buf); @@ -799,8 +805,7 @@ static ssize_t amdgpu_get_ppfeature_status(struct device *dev, } /** - * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk - * pp_dpm_pcie + * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie * * The amdgpu driver provides a sysfs API for adjusting what power levels * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, @@ -816,9 +821,15 @@ static ssize_t amdgpu_get_ppfeature_status(struct device *dev, * * To manually adjust these states, first select manual using * power_dpm_force_performance_level. - * Secondly,Enter a new value for each level by inputing a string that + * Secondly, enter a new value for each level by inputing a string that * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" - * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6. + * E.g., + * + * .. code-block:: bash + * + * echo "4 5 6" > pp_dpm_sclk + * + * will enable sclk levels 4, 5, and 6. * * NOTE: change to the dcefclk max dpm level is not supported now */ @@ -835,7 +846,7 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, PP_SCLK, buf); + return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); else @@ -888,12 +899,15 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, int ret; uint32_t mask = 0; + if (amdgpu_sriov_vf(adev)) + return 0; + ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, PP_SCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); @@ -910,8 +924,12 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && + adev->virt.ops->get_pp_clk) + return adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf); + if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, PP_MCLK, buf); + return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); else @@ -928,12 +946,15 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, int ret; uint32_t mask = 0; + if (amdgpu_sriov_vf(adev)) + return 0; + ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, PP_MCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); @@ -951,7 +972,7 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, PP_SOCCLK, buf); + return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); else @@ -973,7 +994,7 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, PP_SOCCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); @@ -991,7 +1012,7 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, PP_FCLK, buf); + return smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); else @@ -1013,7 +1034,7 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, PP_FCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); @@ -1031,7 +1052,7 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, PP_DCEFCLK, buf); + return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); else @@ -1053,7 +1074,7 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, PP_DCEFCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); @@ -1071,7 +1092,7 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, PP_PCIE, buf); + return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); else @@ -1093,7 +1114,7 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, PP_PCIE, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); @@ -1112,7 +1133,7 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, uint32_t value = 0; if (is_support_sw_smu(adev)) - value = smu_get_od_percentage(&(adev->smu), OD_SCLK); + value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK); else if (adev->powerplay.pp_funcs->get_sclk_od) value = amdgpu_dpm_get_sclk_od(adev); @@ -1137,7 +1158,7 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, } if (is_support_sw_smu(adev)) { - value = smu_set_od_percentage(&(adev->smu), OD_SCLK, (uint32_t)value); + value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value); } else { if (adev->powerplay.pp_funcs->set_sclk_od) amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); @@ -1163,7 +1184,7 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, uint32_t value = 0; if (is_support_sw_smu(adev)) - value = smu_get_od_percentage(&(adev->smu), OD_MCLK); + value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK); else if (adev->powerplay.pp_funcs->get_mclk_od) value = amdgpu_dpm_get_mclk_od(adev); @@ -1188,7 +1209,7 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, } if (is_support_sw_smu(adev)) { - value = smu_set_od_percentage(&(adev->smu), OD_MCLK, (uint32_t)value); + value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value); } else { if (adev->powerplay.pp_funcs->set_mclk_od) amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); @@ -1285,7 +1306,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, } parameter[parameter_size] = profile_mode; if (is_support_sw_smu(adev)) - ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size); + ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true); else if (adev->powerplay.pp_funcs->set_power_profile_mode) ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); if (!ret) @@ -1441,9 +1462,9 @@ static DEVICE_ATTR(gpu_busy_percent, S_IRUGO, static DEVICE_ATTR(mem_busy_percent, S_IRUGO, amdgpu_get_memory_busy_percent, NULL); static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL); -static DEVICE_ATTR(ppfeatures, S_IRUGO | S_IWUSR, - amdgpu_get_ppfeature_status, - amdgpu_set_ppfeature_status); +static DEVICE_ATTR(pp_features, S_IRUGO | S_IWUSR, + amdgpu_get_pp_feature_status, + amdgpu_set_pp_feature_status); static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL); static ssize_t amdgpu_hwmon_show_temp(struct device *dev, @@ -1453,7 +1474,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, struct amdgpu_device *adev = dev_get_drvdata(dev); struct drm_device *ddev = adev->ddev; int channel = to_sensor_dev_attr(attr)->index; - int r, temp, size = sizeof(temp); + int r, temp = 0, size = sizeof(temp); /* Can't get temperature when the card is off */ if ((adev->flags & AMD_IS_PX) && @@ -1608,20 +1629,16 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; - if (is_support_sw_smu(adev)) { - err = kstrtoint(buf, 10, &value); - if (err) - return err; + err = kstrtoint(buf, 10, &value); + if (err) + return err; + if (is_support_sw_smu(adev)) { smu_set_fan_control_mode(&adev->smu, value); } else { if (!adev->powerplay.pp_funcs->set_fan_control_mode) return -EINVAL; - err = kstrtoint(buf, 10, &value); - if (err) - return err; - amdgpu_dpm_set_fan_control_mode(adev, value); } @@ -1725,7 +1742,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, return -EINVAL; if (is_support_sw_smu(adev)) { - err = smu_get_current_rpm(&adev->smu, &speed); + err = smu_get_fan_speed_rpm(&adev->smu, &speed); if (err) return err; } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { @@ -1785,7 +1802,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, return -EINVAL; if (is_support_sw_smu(adev)) { - err = smu_get_current_rpm(&adev->smu, &rpm); + err = smu_get_fan_speed_rpm(&adev->smu, &rpm); if (err) return err; } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { @@ -1998,7 +2015,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, uint32_t limit = 0; if (is_support_sw_smu(adev)) { - smu_get_power_limit(&adev->smu, &limit, true); + smu_get_power_limit(&adev->smu, &limit, true, true); return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); @@ -2016,7 +2033,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, uint32_t limit = 0; if (is_support_sw_smu(adev)) { - smu_get_power_limit(&adev->smu, &limit, false); + smu_get_power_limit(&adev->smu, &limit, false, true); return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); @@ -2041,16 +2058,18 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, return err; value = value / 1000000; /* convert to Watt */ + if (is_support_sw_smu(adev)) { - adev->smu.funcs->set_power_limit(&adev->smu, value); + err = smu_set_power_limit(&adev->smu, value); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) { err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); - if (err) - return err; } else { - return -EINVAL; + err = -EINVAL; } + if (err) + return err; + return count; } @@ -2068,11 +2087,6 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; - /* sanity check PP is enabled */ - if (!(adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->read_sensor)) - return -EINVAL; - /* get the sclk */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&sclk, &size); @@ -2103,11 +2117,6 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; - /* sanity check PP is enabled */ - if (!(adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->read_sensor)) - return -EINVAL; - /* get the sclk */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&mclk, &size); @@ -2192,9 +2201,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, * * - fan1_input: fan speed in RPM * - * - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM) + * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM) * - * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable + * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable * * hwmon interfaces for GPU clocks: * @@ -2345,7 +2354,9 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, effective_mode &= ~S_IWUSR; } - if ((adev->flags & AMD_IS_APU) && + if (((adev->flags & AMD_IS_APU) || + adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ + adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ (attr == &sensor_dev_attr_power1_average.dev_attr.attr || attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| @@ -2369,6 +2380,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, return 0; } + if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ + adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ + (attr == &sensor_dev_attr_in0_input.dev_attr.attr || + attr == &sensor_dev_attr_in0_label.dev_attr.attr)) + return 0; + /* only APUs have vddnb */ if (!(adev->flags & AMD_IS_APU) && (attr == &sensor_dev_attr_in1_input.dev_attr.attr || @@ -2701,6 +2718,44 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev) } +int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev) +{ + int ret = 0; + + if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))) + return ret; + + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_sclk\n"); + return ret; + } + + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_mclk\n"); + return ret; + } + + ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); + if (ret) { + DRM_ERROR("failed to create device file for dpm state\n"); + return ret; + } + + return ret; +} + +void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev) +{ + if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))) + return; + + device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); + device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); + device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); +} + int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) { int r; @@ -2775,6 +2830,19 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) DRM_ERROR("failed to create device file pp_dpm_sclk\n"); return ret; } + + /* Arcturus does not support standalone mclk/socclk/fclk level setting */ + if (adev->asic_type == CHIP_ARCTURUS) { + dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO; + dev_attr_pp_dpm_mclk.store = NULL; + + dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO; + dev_attr_pp_dpm_socclk.store = NULL; + + dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO; + dev_attr_pp_dpm_fclk.store = NULL; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); if (ret) { DRM_ERROR("failed to create device file pp_dpm_mclk\n"); @@ -2786,10 +2854,12 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) DRM_ERROR("failed to create device file pp_dpm_socclk\n"); return ret; } - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_dcefclk\n"); - return ret; + if (adev->asic_type != CHIP_ARCTURUS) { + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_dcefclk\n"); + return ret; + } } } if (adev->asic_type >= CHIP_VEGA20) { @@ -2799,10 +2869,12 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) return ret; } } - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_pcie\n"); - return ret; + if (adev->asic_type != CHIP_ARCTURUS) { + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_pcie\n"); + return ret; + } } ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); if (ret) { @@ -2839,7 +2911,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) return ret; } /* APU does not have its own dedicated memory */ - if (!(adev->flags & AMD_IS_APU)) { + if (!(adev->flags & AMD_IS_APU) && + (adev->asic_type != CHIP_VEGA10)) { ret = device_create_file(adev->dev, &dev_attr_mem_busy_percent); if (ret) { @@ -2871,10 +2944,10 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) if ((adev->asic_type >= CHIP_VEGA10) && !(adev->flags & AMD_IS_APU)) { ret = device_create_file(adev->dev, - &dev_attr_ppfeatures); + &dev_attr_pp_features); if (ret) { DRM_ERROR("failed to create device file " - "ppfeatures\n"); + "pp_features\n"); return ret; } } @@ -2905,9 +2978,11 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); if (adev->asic_type >= CHIP_VEGA10) { device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk); - device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk); + if (adev->asic_type != CHIP_ARCTURUS) + device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk); } - device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); + if (adev->asic_type != CHIP_ARCTURUS) + device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); if (adev->asic_type >= CHIP_VEGA20) device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk); device_remove_file(adev->dev, &dev_attr_pp_sclk_od); @@ -2919,7 +2994,8 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_pp_od_clk_voltage); device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); - if (!(adev->flags & AMD_IS_APU)) + if (!(adev->flags & AMD_IS_APU) && + (adev->asic_type != CHIP_VEGA10)) device_remove_file(adev->dev, &dev_attr_mem_busy_percent); if (!(adev->flags & AMD_IS_APU)) device_remove_file(adev->dev, &dev_attr_pcie_bw); @@ -2927,7 +3003,7 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_unique_id); if ((adev->asic_type >= CHIP_VEGA10) && !(adev->flags & AMD_IS_APU)) - device_remove_file(adev->dev, &dev_attr_ppfeatures); + device_remove_file(adev->dev, &dev_attr_pp_features); } void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) @@ -2947,13 +3023,11 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) } if (is_support_sw_smu(adev)) { - struct smu_context *smu = &adev->smu; struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; - mutex_lock(&(smu->mutex)); smu_handle_task(&adev->smu, smu_dpm->dpm_level, - AMD_PP_TASK_DISPLAY_CONFIG_CHANGE); - mutex_unlock(&(smu->mutex)); + AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, + true); } else { if (adev->powerplay.pp_funcs->dispatch_tasks) { if (!amdgpu_device_has_dc_support(adev)) { @@ -3031,28 +3105,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); - /* UVD clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "UVD: Disabled\n"); - } else { - seq_printf(m, "UVD: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (DCLK)\n", value/100); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + if (adev->asic_type > CHIP_VEGA20) { + /* VCN clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "VCN: Disabled\n"); + } else { + seq_printf(m, "VCN: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (DCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + } } - } - seq_printf(m, "\n"); + seq_printf(m, "\n"); + } else { + /* UVD clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "UVD: Disabled\n"); + } else { + seq_printf(m, "UVD: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (DCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + } + } + seq_printf(m, "\n"); - /* VCE clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "VCE: Disabled\n"); - } else { - seq_printf(m, "VCE: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); + /* VCE clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "VCE: Disabled\n"); + } else { + seq_printf(m, "VCE: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); + } } } |