diff options
Diffstat (limited to 'drivers/cpufreq/intel_pstate.c')
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 178 |
1 files changed, 119 insertions, 59 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index dd66decf2087..34b54df41aaa 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -50,6 +50,8 @@ #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) #define fp_toint(X) ((X) >> FRAC_BITS) +#define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3)) + #define EXT_BITS 6 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) @@ -177,6 +179,7 @@ struct vid_data { * based on the MSR_IA32_MISC_ENABLE value and whether or * not the maximum reported turbo P-state is different from * the maximum reported non-turbo one. + * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo * P-state capacity. * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo @@ -185,6 +188,7 @@ struct vid_data { struct global_params { bool no_turbo; bool turbo_disabled; + bool turbo_disabled_mf; int max_perf_pct; int min_perf_pct; }; @@ -383,7 +387,10 @@ static int intel_pstate_get_cppc_guranteed(int cpu) if (ret) return ret; - return cppc_perf.guaranteed_perf; + if (cppc_perf.guaranteed_perf) + return cppc_perf.guaranteed_perf; + + return cppc_perf.nominal_perf; } #else /* CONFIG_ACPI_CPPC_LIB */ @@ -520,7 +527,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data) u64 epb; int ret; - if (!static_cpu_has(X86_FEATURE_EPB)) + if (!boot_cpu_has(X86_FEATURE_EPB)) return -ENXIO; ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); @@ -534,7 +541,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) { s16 epp; - if (static_cpu_has(X86_FEATURE_HWP_EPP)) { + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { /* * When hwp_req_data is 0, means that caller didn't read * MSR_HWP_REQUEST, so need to read and get EPP. @@ -559,7 +566,7 @@ static int intel_pstate_set_epb(int cpu, s16 pref) u64 epb; int ret; - if (!static_cpu_has(X86_FEATURE_EPB)) + if (!boot_cpu_has(X86_FEATURE_EPB)) return -ENXIO; ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); @@ -607,7 +614,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data) if (epp < 0) return epp; - if (static_cpu_has(X86_FEATURE_HWP_EPP)) { + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { if (epp == HWP_EPP_PERFORMANCE) return 1; if (epp <= HWP_EPP_BALANCE_PERFORMANCE) @@ -616,7 +623,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data) return 3; else return 4; - } else if (static_cpu_has(X86_FEATURE_EPB)) { + } else if (boot_cpu_has(X86_FEATURE_EPB)) { /* * Range: * 0x00-0x03 : Performance @@ -644,7 +651,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, mutex_lock(&intel_pstate_limits_lock); - if (static_cpu_has(X86_FEATURE_HWP_EPP)) { + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { u64 value; ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value); @@ -819,7 +826,7 @@ static void intel_pstate_hwp_set(unsigned int cpu) epp = cpu_data->epp_powersave; } update_epp: - if (static_cpu_has(X86_FEATURE_HWP_EPP)) { + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { value &= ~GENMASK_ULL(31, 24); value |= (u64)epp << 24; } else { @@ -844,7 +851,7 @@ static void intel_pstate_hwp_force_min_perf(int cpu) value |= HWP_MIN_PERF(min_perf); /* Set EPP/EPB to min */ - if (static_cpu_has(X86_FEATURE_HWP_EPP)) + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); else intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE); @@ -892,10 +899,52 @@ static void intel_pstate_update_policies(void) cpufreq_update_policy(cpu); } +static void intel_pstate_update_max_freq(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); + struct cpufreq_policy new_policy; + struct cpudata *cpudata; + + if (!policy) + return; + + cpudata = all_cpu_data[cpu]; + policy->cpuinfo.max_freq = global.turbo_disabled_mf ? + cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; + + memcpy(&new_policy, policy, sizeof(*policy)); + new_policy.max = min(policy->user_policy.max, policy->cpuinfo.max_freq); + new_policy.min = min(policy->user_policy.min, new_policy.max); + + cpufreq_set_policy(policy, &new_policy); + + cpufreq_cpu_release(policy); +} + +static void intel_pstate_update_limits(unsigned int cpu) +{ + mutex_lock(&intel_pstate_driver_lock); + + update_turbo_state(); + /* + * If turbo has been turned on or off globally, policy limits for + * all CPUs need to be updated to reflect that. + */ + if (global.turbo_disabled_mf != global.turbo_disabled) { + global.turbo_disabled_mf = global.turbo_disabled; + for_each_possible_cpu(cpu) + intel_pstate_update_max_freq(cpu); + } else { + cpufreq_update_policy(cpu); + } + + mutex_unlock(&intel_pstate_driver_lock); +} + /************************** sysfs begin ************************/ #define show_one(file_name, object) \ static ssize_t show_##file_name \ - (struct kobject *kobj, struct attribute *attr, char *buf) \ + (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ { \ return sprintf(buf, "%u\n", global.object); \ } @@ -904,7 +953,7 @@ static ssize_t intel_pstate_show_status(char *buf); static int intel_pstate_update_status(const char *buf, size_t size); static ssize_t show_status(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { ssize_t ret; @@ -915,7 +964,7 @@ static ssize_t show_status(struct kobject *kobj, return ret; } -static ssize_t store_status(struct kobject *a, struct attribute *b, +static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { char *p = memchr(buf, '\n', count); @@ -929,7 +978,7 @@ static ssize_t store_status(struct kobject *a, struct attribute *b, } static ssize_t show_turbo_pct(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { struct cpudata *cpu; int total, no_turbo, turbo_pct; @@ -955,7 +1004,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj, } static ssize_t show_num_pstates(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { struct cpudata *cpu; int total; @@ -976,7 +1025,7 @@ static ssize_t show_num_pstates(struct kobject *kobj, } static ssize_t show_no_turbo(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { ssize_t ret; @@ -998,7 +1047,7 @@ static ssize_t show_no_turbo(struct kobject *kobj, return ret; } -static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, +static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; @@ -1045,7 +1094,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, return count; } -static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, +static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; @@ -1075,7 +1124,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, return count; } -static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, +static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; @@ -1107,12 +1156,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, } static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", hwp_boost); } -static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b, +static ssize_t store_hwp_dynamic_boost(struct kobject *a, + struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; @@ -1191,7 +1241,7 @@ static void __init intel_pstate_sysfs_expose_params(void) static void intel_pstate_hwp_enable(struct cpudata *cpudata) { /* First disable HWP notification interrupt as we don't process them */ - if (static_cpu_has(X86_FEATURE_HWP_NOTIFY)) + if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); @@ -1444,12 +1494,6 @@ static int knl_get_turbo_pstate(void) return ret; } -static int intel_pstate_get_base_pstate(struct cpudata *cpu) -{ - return global.no_turbo || global.turbo_disabled ? - cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; -} - static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) { trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); @@ -1470,11 +1514,9 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu) static void intel_pstate_max_within_limits(struct cpudata *cpu) { - int pstate; + int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); update_turbo_state(); - pstate = intel_pstate_get_base_pstate(cpu); - pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); intel_pstate_set_pstate(cpu, pstate); } @@ -1678,17 +1720,14 @@ static inline int32_t get_avg_pstate(struct cpudata *cpu) static inline int32_t get_target_pstate(struct cpudata *cpu) { struct sample *sample = &cpu->sample; - int32_t busy_frac, boost; + int32_t busy_frac; int target, avg_pstate; busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, sample->tsc); - boost = cpu->iowait_boost; - cpu->iowait_boost >>= 1; - - if (busy_frac < boost) - busy_frac = boost; + if (busy_frac < cpu->iowait_boost) + busy_frac = cpu->iowait_boost; sample->busy_scaled = busy_frac * 100; @@ -1715,11 +1754,9 @@ static inline int32_t get_target_pstate(struct cpudata *cpu) static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) { - int max_pstate = intel_pstate_get_base_pstate(cpu); - int min_pstate; + int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); + int max_pstate = max(min_pstate, cpu->max_perf_ratio); - min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); - max_pstate = max(min_pstate, cpu->max_perf_ratio); return clamp_t(int, pstate, min_pstate, max_pstate); } @@ -1767,29 +1804,30 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time, if (smp_processor_id() != cpu->cpu) return; + delta_ns = time - cpu->last_update; if (flags & SCHED_CPUFREQ_IOWAIT) { - cpu->iowait_boost = int_tofp(1); - cpu->last_update = time; - /* - * The last time the busy was 100% so P-state was max anyway - * so avoid overhead of computation. - */ - if (fp_toint(cpu->sample.busy_scaled) == 100) - return; - - goto set_pstate; + /* Start over if the CPU may have been idle. */ + if (delta_ns > TICK_NSEC) { + cpu->iowait_boost = ONE_EIGHTH_FP; + } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) { + cpu->iowait_boost <<= 1; + if (cpu->iowait_boost > int_tofp(1)) + cpu->iowait_boost = int_tofp(1); + } else { + cpu->iowait_boost = ONE_EIGHTH_FP; + } } else if (cpu->iowait_boost) { /* Clear iowait_boost if the CPU may have been idle. */ - delta_ns = time - cpu->last_update; if (delta_ns > TICK_NSEC) cpu->iowait_boost = 0; + else + cpu->iowait_boost >>= 1; } cpu->last_update = time; delta_ns = time - cpu->sample.time; if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) return; -set_pstate: if (intel_pstate_sample(cpu, time)) intel_pstate_adjust_pstate(cpu); } @@ -1976,7 +2014,8 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, if (hwp_active) { intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state); } else { - max_state = intel_pstate_get_base_pstate(cpu); + max_state = global.no_turbo || global.turbo_disabled ? + cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; turbo_max = cpu->pstate.turbo_pstate; } @@ -2143,6 +2182,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; update_turbo_state(); + global.turbo_disabled_mf = global.turbo_disabled; policy->cpuinfo.max_freq = global.turbo_disabled ? cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; policy->cpuinfo.max_freq *= cpu->pstate.scaling; @@ -2187,6 +2227,7 @@ static struct cpufreq_driver intel_pstate = { .init = intel_pstate_cpu_init, .exit = intel_pstate_cpu_exit, .stop_cpu = intel_pstate_stop_cpu, + .update_limits = intel_pstate_update_limits, .name = "intel_pstate", }; @@ -2321,6 +2362,7 @@ static struct cpufreq_driver intel_cpufreq = { .init = intel_cpufreq_cpu_init, .exit = intel_pstate_cpu_exit, .stop_cpu = intel_cpufreq_stop_cpu, + .update_limits = intel_pstate_update_limits, .name = "intel_cpufreq", }; @@ -2475,6 +2517,7 @@ static bool __init intel_pstate_no_acpi_pss(void) kfree(pss); } + pr_debug("ACPI _PSS not found\n"); return true; } @@ -2485,9 +2528,14 @@ static bool __init intel_pstate_no_acpi_pcch(void) status = acpi_get_handle(NULL, "\\_SB", &handle); if (ACPI_FAILURE(status)) - return true; + goto not_found; + + if (acpi_has_method(handle, "PCCH")) + return false; - return !acpi_has_method(handle, "PCCH"); +not_found: + pr_debug("ACPI PCCH not found\n"); + return true; } static bool __init intel_pstate_has_acpi_ppc(void) @@ -2502,6 +2550,7 @@ static bool __init intel_pstate_has_acpi_ppc(void) if (acpi_has_method(pr->handle, "_PPC")) return true; } + pr_debug("ACPI _PPC not found\n"); return false; } @@ -2539,8 +2588,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void) id = x86_match_cpu(intel_pstate_cpu_oob_ids); if (id) { rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); - if ( misc_pwr & (1 << 8)) + if (misc_pwr & (1 << 8)) { + pr_debug("Bit 8 in the MISC_PWR_MGMT MSR set\n"); return true; + } } idx = acpi_match_platform_list(plat_info); @@ -2592,6 +2643,9 @@ static int __init intel_pstate_init(void) const struct x86_cpu_id *id; int rc; + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return -ENODEV; + if (no_load) return -ENODEV; @@ -2606,22 +2660,28 @@ static int __init intel_pstate_init(void) } } else { id = x86_match_cpu(intel_pstate_cpu_ids); - if (!id) + if (!id) { + pr_info("CPU model not supported\n"); return -ENODEV; + } copy_cpu_funcs((struct pstate_funcs *)id->driver_data); } - if (intel_pstate_msrs_not_valid()) + if (intel_pstate_msrs_not_valid()) { + pr_info("Invalid MSRs\n"); return -ENODEV; + } hwp_cpu_matched: /* * The Intel pstate driver will be ignored if the platform * firmware has its own power management modes. */ - if (intel_pstate_platform_pwr_mgmt_exists()) + if (intel_pstate_platform_pwr_mgmt_exists()) { + pr_info("P-states controlled by the platform\n"); return -ENODEV; + } if (!hwp_active && hwp_only) return -ENOTSUPP; |