diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2024-09-11 18:22:23 +0200 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2024-09-11 18:22:23 +0200 |
commit | 9bcf30348f327658c93894fca6392e147f4383a5 (patch) | |
tree | 7b0ba8a0fa74e2dc64460dc367adf7d370319826 /drivers/cpufreq/amd-pstate.c | |
parent | 6af3aab6c7cfdd0daabd9f00e32e0ad46a6d176e (diff) | |
parent | 93497752dfed196b41d2804503e80b9a04318adb (diff) |
Merge tag 'amd-pstate-v6.12-2024-09-11' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/superm1/linux
Merge the second round of amd-pstate changes for 6.12 from Mario
Limonciello:
"* Move the calculation of the AMD boost numerator outside of
amd-pstate, correcting acpi-cpufreq on systems with preferred cores
* Harden preferred core detection to avoid potential false positives
* Add extra unit test coverage for mode state machine"
* tag 'amd-pstate-v6.12-2024-09-11' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/superm1/linux:
cpufreq/amd-pstate-ut: Fix an "Uninitialized variables" issue
cpufreq/amd-pstate-ut: Add test case for mode switches
cpufreq/amd-pstate: Export symbols for changing modes
amd-pstate: Add missing documentation for `amd_pstate_prefcore_ranking`
cpufreq: amd-pstate: Add documentation for `amd_pstate_hw_prefcore`
cpufreq: amd-pstate: Optimize amd_pstate_update_limits()
cpufreq: amd-pstate: Merge amd_pstate_highest_perf_set() into amd_get_boost_ratio_numerator()
x86/amd: Detect preferred cores in amd_get_boost_ratio_numerator()
x86/amd: Move amd_get_highest_perf() out of amd-pstate
ACPI: CPPC: Adjust debug messages in amd_set_max_freq_ratio() to warn
ACPI: CPPC: Drop check for non zero perf ratio
x86/amd: Rename amd_get_highest_perf() to amd_get_boost_ratio_numerator()
ACPI: CPPC: Adjust return code for inline functions in !CONFIG_ACPI_CPPC_LIB
x86/amd: Move amd_get_highest_perf() from amd.c to cppc.c
Diffstat (limited to 'drivers/cpufreq/amd-pstate.c')
-rw-r--r-- | drivers/cpufreq/amd-pstate.c | 151 |
1 files changed, 39 insertions, 112 deletions
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 46b4d0078b39..15e201d5e911 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -52,26 +52,12 @@ #define AMD_PSTATE_TRANSITION_LATENCY 20000 #define AMD_PSTATE_TRANSITION_DELAY 1000 #define AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY 600 -#define CPPC_HIGHEST_PERF_PERFORMANCE 196 -#define CPPC_HIGHEST_PERF_DEFAULT 166 #define AMD_CPPC_EPP_PERFORMANCE 0x00 #define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80 #define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF #define AMD_CPPC_EPP_POWERSAVE 0xFF -/* - * enum amd_pstate_mode - driver working mode of amd pstate - */ -enum amd_pstate_mode { - AMD_PSTATE_UNDEFINED = 0, - AMD_PSTATE_DISABLE, - AMD_PSTATE_PASSIVE, - AMD_PSTATE_ACTIVE, - AMD_PSTATE_GUIDED, - AMD_PSTATE_MAX, -}; - static const char * const amd_pstate_mode_string[] = { [AMD_PSTATE_UNDEFINED] = "undefined", [AMD_PSTATE_DISABLE] = "disable", @@ -81,6 +67,14 @@ static const char * const amd_pstate_mode_string[] = { NULL, }; +const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode) +{ + if (mode < 0 || mode >= AMD_PSTATE_MAX) + return NULL; + return amd_pstate_mode_string[mode]; +} +EXPORT_SYMBOL_GPL(amd_pstate_get_mode_string); + struct quirk_entry { u32 nominal_freq; u32 lowest_freq; @@ -372,43 +366,17 @@ static inline int amd_pstate_enable(bool enable) return static_call(amd_pstate_enable)(enable); } -static u32 amd_pstate_highest_perf_set(struct amd_cpudata *cpudata) -{ - struct cpuinfo_x86 *c = &cpu_data(0); - - /* - * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f, - * the highest performance level is set to 196. - * https://bugzilla.kernel.org/show_bug.cgi?id=218759 - */ - if (c->x86 == 0x19 && (c->x86_model >= 0x70 && c->x86_model <= 0x7f)) - return CPPC_HIGHEST_PERF_PERFORMANCE; - - return CPPC_HIGHEST_PERF_DEFAULT; -} - static int pstate_init_perf(struct amd_cpudata *cpudata) { u64 cap1; - u32 highest_perf; int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &cap1); if (ret) return ret; - /* For platforms that do not support the preferred core feature, the - * highest_pef may be configured with 166 or 255, to avoid max frequency - * calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1) value as - * the default max perf. - */ - if (cpudata->hw_prefcore) - highest_perf = amd_pstate_highest_perf_set(cpudata); - else - highest_perf = AMD_CPPC_HIGHEST_PERF(cap1); - - WRITE_ONCE(cpudata->highest_perf, highest_perf); - WRITE_ONCE(cpudata->max_limit_perf, highest_perf); + WRITE_ONCE(cpudata->highest_perf, AMD_CPPC_HIGHEST_PERF(cap1)); + WRITE_ONCE(cpudata->max_limit_perf, AMD_CPPC_HIGHEST_PERF(cap1)); WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1)); WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1)); WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1)); @@ -420,19 +388,13 @@ static int pstate_init_perf(struct amd_cpudata *cpudata) static int cppc_init_perf(struct amd_cpudata *cpudata) { struct cppc_perf_caps cppc_perf; - u32 highest_perf; int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); if (ret) return ret; - if (cpudata->hw_prefcore) - highest_perf = amd_pstate_highest_perf_set(cpudata); - else - highest_perf = cppc_perf.highest_perf; - - WRITE_ONCE(cpudata->highest_perf, highest_perf); - WRITE_ONCE(cpudata->max_limit_perf, highest_perf); + WRITE_ONCE(cpudata->highest_perf, cppc_perf.highest_perf); + WRITE_ONCE(cpudata->max_limit_perf, cppc_perf.highest_perf); WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); WRITE_ONCE(cpudata->lowest_nonlinear_perf, cppc_perf.lowest_nonlinear_perf); @@ -811,66 +773,22 @@ static void amd_pstste_sched_prefcore_workfn(struct work_struct *work) } static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn); -/* - * Get the highest performance register value. - * @cpu: CPU from which to get highest performance. - * @highest_perf: Return address. - * - * Return: 0 for success, -EIO otherwise. - */ -static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf) -{ - int ret; - - if (cpu_feature_enabled(X86_FEATURE_CPPC)) { - u64 cap1; - - ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1); - if (ret) - return ret; - WRITE_ONCE(*highest_perf, AMD_CPPC_HIGHEST_PERF(cap1)); - } else { - u64 cppc_highest_perf; - - ret = cppc_get_highest_perf(cpu, &cppc_highest_perf); - if (ret) - return ret; - WRITE_ONCE(*highest_perf, cppc_highest_perf); - } - - return (ret); -} - #define CPPC_MAX_PERF U8_MAX static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata) { - int ret, prio; - u32 highest_perf; - - ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf); - if (ret) + /* user disabled or not detected */ + if (!amd_pstate_prefcore) return; cpudata->hw_prefcore = true; - /* check if CPPC preferred core feature is enabled*/ - if (highest_perf < CPPC_MAX_PERF) - prio = (int)highest_perf; - else { - pr_debug("AMD CPPC preferred core is unsupported!\n"); - cpudata->hw_prefcore = false; - return; - } - - if (!amd_pstate_prefcore) - return; /* * The priorities can be set regardless of whether or not * sched_set_itmt_support(true) has been called and it is valid to * update them at any time after it has been called. */ - sched_set_itmt_core_prio(prio, cpudata->cpu); + sched_set_itmt_core_prio((int)READ_ONCE(cpudata->highest_perf), cpudata->cpu); schedule_work(&sched_prefcore_work); } @@ -888,17 +806,17 @@ static void amd_pstate_update_limits(unsigned int cpu) cpudata = policy->driver_data; - mutex_lock(&amd_pstate_driver_lock); - if ((!amd_pstate_prefcore) || (!cpudata->hw_prefcore)) - goto free_cpufreq_put; + if (!amd_pstate_prefcore) + return; - ret = amd_pstate_get_highest_perf(cpu, &cur_high); + mutex_lock(&amd_pstate_driver_lock); + ret = amd_get_highest_perf(cpu, &cur_high); if (ret) goto free_cpufreq_put; prev_high = READ_ONCE(cpudata->prefcore_ranking); - if (prev_high != cur_high) { - highest_perf_changed = true; + highest_perf_changed = (prev_high != cur_high); + if (highest_perf_changed) { WRITE_ONCE(cpudata->prefcore_ranking, cur_high); if (cur_high < CPPC_MAX_PERF) @@ -962,8 +880,8 @@ static u32 amd_pstate_get_transition_latency(unsigned int cpu) static int amd_pstate_init_freq(struct amd_cpudata *cpudata) { int ret; - u32 min_freq; - u32 highest_perf, max_freq; + u32 min_freq, max_freq; + u64 numerator; u32 nominal_perf, nominal_freq; u32 lowest_nonlinear_perf, lowest_nonlinear_freq; u32 boost_ratio, lowest_nonlinear_ratio; @@ -985,8 +903,10 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata) nominal_perf = READ_ONCE(cpudata->nominal_perf); - highest_perf = READ_ONCE(cpudata->highest_perf); - boost_ratio = div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf); + ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator); + if (ret) + return ret; + boost_ratio = div_u64(numerator << SCHED_CAPACITY_SHIFT, nominal_perf); max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000; lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); @@ -1041,12 +961,12 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) cpudata->cpu = policy->cpu; - amd_pstate_init_prefcore(cpudata); - ret = amd_pstate_init_perf(cpudata); if (ret) goto free_cpudata1; + amd_pstate_init_prefcore(cpudata); + ret = amd_pstate_init_freq(cpudata); if (ret) goto free_cpudata1; @@ -1362,7 +1282,7 @@ static ssize_t amd_pstate_show_status(char *buf) return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]); } -static int amd_pstate_update_status(const char *buf, size_t size) +int amd_pstate_update_status(const char *buf, size_t size) { int mode_idx; @@ -1379,6 +1299,7 @@ static int amd_pstate_update_status(const char *buf, size_t size) return 0; } +EXPORT_SYMBOL_GPL(amd_pstate_update_status); static ssize_t status_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1496,12 +1417,12 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) cpudata->cpu = policy->cpu; cpudata->epp_policy = 0; - amd_pstate_init_prefcore(cpudata); - ret = amd_pstate_init_perf(cpudata); if (ret) goto free_cpudata1; + amd_pstate_init_prefcore(cpudata); + ret = amd_pstate_init_freq(cpudata); if (ret) goto free_cpudata1; @@ -1963,6 +1884,12 @@ static int __init amd_pstate_init(void) static_call_update(amd_pstate_update_perf, cppc_update_perf); } + if (amd_pstate_prefcore) { + ret = amd_detect_prefcore(&amd_pstate_prefcore); + if (ret) + return ret; + } + /* enable amd pstate feature */ ret = amd_pstate_enable(true); if (ret) { |