diff options
Diffstat (limited to 'drivers/cpufreq/intel_pstate.c')
| -rw-r--r-- | drivers/cpufreq/intel_pstate.c | 173 | 
1 files changed, 66 insertions, 107 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index dbbf299f4219..4b986c044741 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -173,7 +173,6 @@ struct vid_data {   *			based on the MSR_IA32_MISC_ENABLE value and whether or   *			not the maximum reported turbo P-state is different from   *			the maximum reported non-turbo one. - * @turbo_disabled_mf:	The @turbo_disabled value reflected by cpuinfo.max_freq.   * @min_perf_pct:	Minimum capacity limit in percent of the maximum turbo   *			P-state capacity.   * @max_perf_pct:	Maximum capacity limit in percent of the maximum turbo @@ -182,7 +181,6 @@ struct vid_data {  struct global_params {  	bool no_turbo;  	bool turbo_disabled; -	bool turbo_disabled_mf;  	int max_perf_pct;  	int min_perf_pct;  }; @@ -213,7 +211,7 @@ struct global_params {   * @epp_policy:		Last saved policy used to set EPP/EPB   * @epp_default:	Power on default HWP energy performance   *			preference/bias - * @epp_cached		Cached HWP energy-performance preference value + * @epp_cached:		Cached HWP energy-performance preference value   * @hwp_req_cached:	Cached value of the last HWP Request MSR   * @hwp_cap_cached:	Cached value of the last HWP Capabilities MSR   * @last_io_update:	Last time when IO wake flag was set @@ -292,11 +290,11 @@ struct pstate_funcs {  static struct pstate_funcs pstate_funcs __read_mostly; -static int hwp_active __read_mostly; -static int hwp_mode_bdw __read_mostly; -static bool per_cpu_limits __read_mostly; +static bool hwp_active __ro_after_init; +static int hwp_mode_bdw __ro_after_init; +static bool per_cpu_limits __ro_after_init; +static bool hwp_forced __ro_after_init;  static bool hwp_boost __read_mostly; -static bool hwp_forced __read_mostly;  static struct cpufreq_driver *intel_pstate_driver __read_mostly; @@ -594,12 +592,13 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)  	cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);  } -static inline void update_turbo_state(void) +static bool turbo_is_disabled(void)  {  	u64 misc_en;  	rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); -	global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE; + +	return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);  }  static int min_perf_pct_min(void) @@ -1154,12 +1153,15 @@ static void intel_pstate_update_policies(void)  static void __intel_pstate_update_max_freq(struct cpudata *cpudata,  					   struct cpufreq_policy *policy)  { -	policy->cpuinfo.max_freq = global.turbo_disabled_mf ? +	intel_pstate_get_hwp_cap(cpudata); + +	policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?  			cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; +  	refresh_frequency_limits(policy);  } -static void intel_pstate_update_max_freq(unsigned int cpu) +static void intel_pstate_update_limits(unsigned int cpu)  {  	struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); @@ -1171,25 +1173,12 @@ static void intel_pstate_update_max_freq(unsigned int cpu)  	cpufreq_cpu_release(policy);  } -static void intel_pstate_update_limits(unsigned int cpu) +static void intel_pstate_update_limits_for_all(void)  { -	mutex_lock(&intel_pstate_driver_lock); - -	update_turbo_state(); -	/* -	 * If turbo has been turned on or off globally, policy limits for -	 * all CPUs need to be updated to reflect that. -	 */ -	if (global.turbo_disabled_mf != global.turbo_disabled) { -		global.turbo_disabled_mf = global.turbo_disabled; -		arch_set_max_freq_ratio(global.turbo_disabled); -		for_each_possible_cpu(cpu) -			intel_pstate_update_max_freq(cpu); -	} else { -		cpufreq_update_policy(cpu); -	} +	int cpu; -	mutex_unlock(&intel_pstate_driver_lock); +	for_each_possible_cpu(cpu) +		intel_pstate_update_limits(cpu);  }  /************************** sysfs begin ************************/ @@ -1287,11 +1276,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,  		return -EAGAIN;  	} -	update_turbo_state(); -	if (global.turbo_disabled) -		ret = sprintf(buf, "%u\n", global.turbo_disabled); -	else -		ret = sprintf(buf, "%u\n", global.no_turbo); +	ret = sprintf(buf, "%u\n", global.no_turbo);  	mutex_unlock(&intel_pstate_driver_lock); @@ -1302,32 +1287,34 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,  			      const char *buf, size_t count)  {  	unsigned int input; -	int ret; +	bool no_turbo; -	ret = sscanf(buf, "%u", &input); -	if (ret != 1) +	if (sscanf(buf, "%u", &input) != 1)  		return -EINVAL;  	mutex_lock(&intel_pstate_driver_lock);  	if (!intel_pstate_driver) { -		mutex_unlock(&intel_pstate_driver_lock); -		return -EAGAIN; +		count = -EAGAIN; +		goto unlock_driver;  	} -	mutex_lock(&intel_pstate_limits_lock); +	no_turbo = !!clamp_t(int, input, 0, 1); + +	if (no_turbo == global.no_turbo) +		goto unlock_driver; -	update_turbo_state();  	if (global.turbo_disabled) {  		pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); -		mutex_unlock(&intel_pstate_limits_lock); -		mutex_unlock(&intel_pstate_driver_lock); -		return -EPERM; +		count = -EPERM; +		goto unlock_driver;  	} -	global.no_turbo = clamp_t(int, input, 0, 1); +	WRITE_ONCE(global.no_turbo, no_turbo); -	if (global.no_turbo) { +	mutex_lock(&intel_pstate_limits_lock); + +	if (no_turbo) {  		struct cpudata *cpu = all_cpu_data[0];  		int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; @@ -1338,9 +1325,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,  	mutex_unlock(&intel_pstate_limits_lock); -	intel_pstate_update_policies(); -	arch_set_max_freq_ratio(global.no_turbo); +	intel_pstate_update_limits_for_all(); +	arch_set_max_freq_ratio(no_turbo); +unlock_driver:  	mutex_unlock(&intel_pstate_driver_lock);  	return count; @@ -1621,7 +1609,6 @@ static void intel_pstate_notify_work(struct work_struct *work)  	struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);  	if (policy) { -		intel_pstate_get_hwp_cap(cpudata);  		__intel_pstate_update_max_freq(cpudata, policy);  		cpufreq_cpu_release(policy); @@ -1636,11 +1623,10 @@ static cpumask_t hwp_intr_enable_mask;  void notify_hwp_interrupt(void)  {  	unsigned int this_cpu = smp_processor_id(); -	struct cpudata *cpudata;  	unsigned long flags;  	u64 value; -	if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) +	if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))  		return;  	rdmsrl_safe(MSR_HWP_STATUS, &value); @@ -1652,24 +1638,8 @@ void notify_hwp_interrupt(void)  	if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))  		goto ack_intr; -	/* -	 * Currently we never free all_cpu_data. And we can't reach here -	 * without this allocated. But for safety for future changes, added -	 * check. -	 */ -	if (unlikely(!READ_ONCE(all_cpu_data))) -		goto ack_intr; - -	/* -	 * The free is done during cleanup, when cpufreq registry is failed. -	 * We wouldn't be here if it fails on init or switch status. But for -	 * future changes, added check. -	 */ -	cpudata = READ_ONCE(all_cpu_data[this_cpu]); -	if (unlikely(!cpudata)) -		goto ack_intr; - -	schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10)); +	schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work, +			      msecs_to_jiffies(10));  	spin_unlock_irqrestore(&hwp_notify_lock, flags); @@ -1682,7 +1652,7 @@ ack_intr:  static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)  { -	unsigned long flags; +	bool cancel_work;  	if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY))  		return; @@ -1690,22 +1660,22 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)  	/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */  	wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); -	spin_lock_irqsave(&hwp_notify_lock, flags); -	if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask)) -		cancel_delayed_work(&cpudata->hwp_notify_work); -	spin_unlock_irqrestore(&hwp_notify_lock, flags); +	spin_lock_irq(&hwp_notify_lock); +	cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask); +	spin_unlock_irq(&hwp_notify_lock); + +	if (cancel_work) +		cancel_delayed_work_sync(&cpudata->hwp_notify_work);  }  static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)  {  	/* Enable HWP notification interrupt for guaranteed performance change */  	if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { -		unsigned long flags; - -		spin_lock_irqsave(&hwp_notify_lock, flags); +		spin_lock_irq(&hwp_notify_lock);  		INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);  		cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); -		spin_unlock_irqrestore(&hwp_notify_lock, flags); +		spin_unlock_irq(&hwp_notify_lock);  		/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */  		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01); @@ -1791,7 +1761,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)  	u32 vid;  	val = (u64)pstate << 8; -	if (global.no_turbo && !global.turbo_disabled) +	if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)  		val |= (u64)1 << 32;  	vid_fp = cpudata->vid.min + mul_fp( @@ -1956,7 +1926,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)  	u64 val;  	val = (u64)pstate << 8; -	if (global.no_turbo && !global.turbo_disabled) +	if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)  		val |= (u64)1 << 32;  	return val; @@ -2029,14 +1999,6 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)  	intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);  } -static void intel_pstate_max_within_limits(struct cpudata *cpu) -{ -	int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); - -	update_turbo_state(); -	intel_pstate_set_pstate(cpu, pstate); -} -  static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)  {  	int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu); @@ -2262,7 +2224,7 @@ static inline int32_t get_target_pstate(struct cpudata *cpu)  	sample->busy_scaled = busy_frac * 100; -	target = global.no_turbo || global.turbo_disabled ? +	target = READ_ONCE(global.no_turbo) ?  			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;  	target += target >> 2;  	target = mul_fp(target, busy_frac); @@ -2306,8 +2268,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu)  	struct sample *sample;  	int target_pstate; -	update_turbo_state(); -  	target_pstate = get_target_pstate(cpu);  	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);  	trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); @@ -2437,6 +2397,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {  };  MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); +#ifdef CONFIG_ACPI  static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {  	X86_MATCH(BROADWELL_D,		core_funcs),  	X86_MATCH(BROADWELL_X,		core_funcs), @@ -2445,6 +2406,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {  	X86_MATCH(SAPPHIRERAPIDS_X,	core_funcs),  	{}  }; +#endif  static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {  	X86_MATCH(KABYLAKE,		core_funcs), @@ -2526,7 +2488,7 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)  static int intel_pstate_get_max_freq(struct cpudata *cpu)  { -	return global.turbo_disabled || global.no_turbo ? +	return READ_ONCE(global.no_turbo) ?  			cpu->pstate.max_freq : cpu->pstate.turbo_freq;  } @@ -2611,12 +2573,14 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)  	intel_pstate_update_perf_limits(cpu, policy->min, policy->max);  	if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { +		int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); +  		/*  		 * NOHZ_FULL CPUs need this as the governor callback may not  		 * be invoked on them.  		 */  		intel_pstate_clear_update_util_hook(policy->cpu); -		intel_pstate_max_within_limits(cpu); +		intel_pstate_set_pstate(cpu, pstate);  	} else {  		intel_pstate_set_update_util_hook(policy->cpu);  	} @@ -2659,10 +2623,9 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,  {  	int max_freq; -	update_turbo_state();  	if (hwp_active) {  		intel_pstate_get_hwp_cap(cpu); -		max_freq = global.no_turbo || global.turbo_disabled ? +		max_freq = READ_ONCE(global.no_turbo) ?  				cpu->pstate.max_freq : cpu->pstate.turbo_freq;  	} else {  		max_freq = intel_pstate_get_max_freq(cpu); @@ -2756,9 +2719,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)  	/* cpuinfo and default policy values */  	policy->cpuinfo.min_freq = cpu->pstate.min_freq; -	update_turbo_state(); -	global.turbo_disabled_mf = global.turbo_disabled; -	policy->cpuinfo.max_freq = global.turbo_disabled ? +	policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?  			cpu->pstate.max_freq : cpu->pstate.turbo_freq;  	policy->min = policy->cpuinfo.min_freq; @@ -2923,8 +2884,6 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,  	struct cpufreq_freqs freqs;  	int target_pstate; -	update_turbo_state(); -  	freqs.old = policy->cur;  	freqs.new = target_freq; @@ -2946,8 +2905,6 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,  	struct cpudata *cpu = all_cpu_data[policy->cpu];  	int target_pstate; -	update_turbo_state(); -  	target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);  	target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true); @@ -2965,9 +2922,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,  	int old_pstate = cpu->pstate.current_pstate;  	int cap_pstate, min_pstate, max_pstate, target_pstate; -	update_turbo_state(); -	cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) : -					     HWP_HIGHEST_PERF(hwp_cap); +	cap_pstate = READ_ONCE(global.no_turbo) ? +					HWP_GUARANTEED_PERF(hwp_cap) : +					HWP_HIGHEST_PERF(hwp_cap);  	/* Optimization: Avoid unnecessary divisions. */ @@ -3135,10 +3092,8 @@ static void intel_pstate_driver_cleanup(void)  			if (intel_pstate_driver == &intel_pstate)  				intel_pstate_clear_update_util_hook(cpu); -			spin_lock(&hwp_notify_lock);  			kfree(all_cpu_data[cpu]);  			WRITE_ONCE(all_cpu_data[cpu], NULL); -			spin_unlock(&hwp_notify_lock);  		}  	}  	cpus_read_unlock(); @@ -3155,6 +3110,10 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)  	memset(&global, 0, sizeof(global));  	global.max_perf_pct = 100; +	global.turbo_disabled = turbo_is_disabled(); +	global.no_turbo = global.turbo_disabled; + +	arch_set_max_freq_ratio(global.turbo_disabled);  	intel_pstate_driver = driver;  	ret = cpufreq_register_driver(intel_pstate_driver); @@ -3466,7 +3425,7 @@ static int __init intel_pstate_init(void)  		 * deal with it.  		 */  		if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { -			WRITE_ONCE(hwp_active, 1); +			hwp_active = true;  			hwp_mode_bdw = id->driver_data;  			intel_pstate.attr = hwp_cpufreq_attrs;  			intel_cpufreq.attr = hwp_cpufreq_attrs;  |