diff options
Diffstat (limited to 'drivers/cpufreq/amd-pstate.c')
| -rw-r--r-- | drivers/cpufreq/amd-pstate.c | 245 | 
1 files changed, 175 insertions, 70 deletions
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 73c7643b2697..ddd346a239e0 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -63,7 +63,6 @@ static struct cpufreq_driver *current_pstate_driver;  static struct cpufreq_driver amd_pstate_driver;  static struct cpufreq_driver amd_pstate_epp_driver;  static int cppc_state = AMD_PSTATE_DISABLE; -struct kobject *amd_pstate_kobj;  /*   * AMD Energy Preference Performance (EPP) @@ -106,6 +105,8 @@ static unsigned int epp_values[] = {  	[EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE,   }; +typedef int (*cppc_mode_transition_fn)(int); +  static inline int get_mode_idx_from_str(const char *str, size_t size)  {  	int i; @@ -308,7 +309,22 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)  		   cppc_perf.lowest_nonlinear_perf);  	WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); -	return 0; +	if (cppc_state == AMD_PSTATE_ACTIVE) +		return 0; + +	ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf); +	if (ret) { +		pr_warn("failed to get auto_sel, ret: %d\n", ret); +		return 0; +	} + +	ret = cppc_set_auto_sel(cpudata->cpu, +			(cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1); + +	if (ret) +		pr_warn("failed to set auto_sel, ret: %d\n", ret); + +	return ret;  }  DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf); @@ -385,12 +401,18 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)  }  static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, -			      u32 des_perf, u32 max_perf, bool fast_switch) +			      u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)  {  	u64 prev = READ_ONCE(cpudata->cppc_req_cached);  	u64 value = prev;  	des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); + +	if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) { +		min_perf = des_perf; +		des_perf = 0; +	} +  	value &= ~AMD_CPPC_MIN_PERF(~0L);  	value |= AMD_CPPC_MIN_PERF(min_perf); @@ -422,9 +444,8 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)  	return 0;  } -static int amd_pstate_target(struct cpufreq_policy *policy, -			     unsigned int target_freq, -			     unsigned int relation) +static int amd_pstate_update_freq(struct cpufreq_policy *policy, +				  unsigned int target_freq, bool fast_switch)  {  	struct cpufreq_freqs freqs;  	struct amd_cpudata *cpudata = policy->driver_data; @@ -443,26 +464,51 @@ static int amd_pstate_target(struct cpufreq_policy *policy,  	des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,  				     cpudata->max_freq); -	cpufreq_freq_transition_begin(policy, &freqs); +	WARN_ON(fast_switch && !policy->fast_switch_enabled); +	/* +	 * If fast_switch is desired, then there aren't any registered +	 * transition notifiers. See comment for +	 * cpufreq_enable_fast_switch(). +	 */ +	if (!fast_switch) +		cpufreq_freq_transition_begin(policy, &freqs); +  	amd_pstate_update(cpudata, min_perf, des_perf, -			  max_perf, false); -	cpufreq_freq_transition_end(policy, &freqs, false); +			max_perf, fast_switch, policy->governor->flags); + +	if (!fast_switch) +		cpufreq_freq_transition_end(policy, &freqs, false);  	return 0;  } +static int amd_pstate_target(struct cpufreq_policy *policy, +			     unsigned int target_freq, +			     unsigned int relation) +{ +	return amd_pstate_update_freq(policy, target_freq, false); +} + +static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy, +				  unsigned int target_freq) +{ +	return amd_pstate_update_freq(policy, target_freq, true); +} +  static void amd_pstate_adjust_perf(unsigned int cpu,  				   unsigned long _min_perf,  				   unsigned long target_perf,  				   unsigned long capacity)  {  	unsigned long max_perf, min_perf, des_perf, -		      cap_perf, lowest_nonlinear_perf; +		      cap_perf, lowest_nonlinear_perf, max_freq;  	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);  	struct amd_cpudata *cpudata = policy->driver_data; +	unsigned int target_freq;  	cap_perf = READ_ONCE(cpudata->highest_perf);  	lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); +	max_freq = READ_ONCE(cpudata->max_freq);  	des_perf = cap_perf;  	if (target_perf < capacity) @@ -479,7 +525,12 @@ static void amd_pstate_adjust_perf(unsigned int cpu,  	if (max_perf < min_perf)  		max_perf = min_perf; -	amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true); +	des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); +	target_freq = div_u64(des_perf * max_freq, max_perf); +	policy->cur = target_freq; + +	amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true, +			policy->governor->flags);  	cpufreq_cpu_put(policy);  } @@ -692,6 +743,7 @@ static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)  	freq_qos_remove_request(&cpudata->req[1]);  	freq_qos_remove_request(&cpudata->req[0]); +	policy->fast_switch_possible = false;  	kfree(cpudata);  	return 0; @@ -816,6 +868,98 @@ static ssize_t show_energy_performance_preference(  	return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);  } +static void amd_pstate_driver_cleanup(void) +{ +	amd_pstate_enable(false); +	cppc_state = AMD_PSTATE_DISABLE; +	current_pstate_driver = NULL; +} + +static int amd_pstate_register_driver(int mode) +{ +	int ret; + +	if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED) +		current_pstate_driver = &amd_pstate_driver; +	else if (mode == AMD_PSTATE_ACTIVE) +		current_pstate_driver = &amd_pstate_epp_driver; +	else +		return -EINVAL; + +	cppc_state = mode; +	ret = cpufreq_register_driver(current_pstate_driver); +	if (ret) { +		amd_pstate_driver_cleanup(); +		return ret; +	} +	return 0; +} + +static int amd_pstate_unregister_driver(int dummy) +{ +	cpufreq_unregister_driver(current_pstate_driver); +	amd_pstate_driver_cleanup(); +	return 0; +} + +static int amd_pstate_change_mode_without_dvr_change(int mode) +{ +	int cpu = 0; + +	cppc_state = mode; + +	if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE) +		return 0; + +	for_each_present_cpu(cpu) { +		cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1); +	} + +	return 0; +} + +static int amd_pstate_change_driver_mode(int mode) +{ +	int ret; + +	ret = amd_pstate_unregister_driver(0); +	if (ret) +		return ret; + +	ret = amd_pstate_register_driver(mode); +	if (ret) +		return ret; + +	return 0; +} + +static cppc_mode_transition_fn mode_state_machine[AMD_PSTATE_MAX][AMD_PSTATE_MAX] = { +	[AMD_PSTATE_DISABLE]         = { +		[AMD_PSTATE_DISABLE]     = NULL, +		[AMD_PSTATE_PASSIVE]     = amd_pstate_register_driver, +		[AMD_PSTATE_ACTIVE]      = amd_pstate_register_driver, +		[AMD_PSTATE_GUIDED]      = amd_pstate_register_driver, +	}, +	[AMD_PSTATE_PASSIVE]         = { +		[AMD_PSTATE_DISABLE]     = amd_pstate_unregister_driver, +		[AMD_PSTATE_PASSIVE]     = NULL, +		[AMD_PSTATE_ACTIVE]      = amd_pstate_change_driver_mode, +		[AMD_PSTATE_GUIDED]      = amd_pstate_change_mode_without_dvr_change, +	}, +	[AMD_PSTATE_ACTIVE]          = { +		[AMD_PSTATE_DISABLE]     = amd_pstate_unregister_driver, +		[AMD_PSTATE_PASSIVE]     = amd_pstate_change_driver_mode, +		[AMD_PSTATE_ACTIVE]      = NULL, +		[AMD_PSTATE_GUIDED]      = amd_pstate_change_driver_mode, +	}, +	[AMD_PSTATE_GUIDED]          = { +		[AMD_PSTATE_DISABLE]     = amd_pstate_unregister_driver, +		[AMD_PSTATE_PASSIVE]     = amd_pstate_change_mode_without_dvr_change, +		[AMD_PSTATE_ACTIVE]      = amd_pstate_change_driver_mode, +		[AMD_PSTATE_GUIDED]      = NULL, +	}, +}; +  static ssize_t amd_pstate_show_status(char *buf)  {  	if (!current_pstate_driver) @@ -824,57 +968,22 @@ static ssize_t amd_pstate_show_status(char *buf)  	return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);  } -static void amd_pstate_driver_cleanup(void) -{ -	current_pstate_driver = NULL; -} -  static int amd_pstate_update_status(const char *buf, size_t size)  { -	int ret = 0;  	int mode_idx; -	if (size > 7 || size < 6) +	if (size > strlen("passive") || size < strlen("active"))  		return -EINVAL; -	mode_idx = get_mode_idx_from_str(buf, size); -	switch(mode_idx) { -	case AMD_PSTATE_DISABLE: -		if (!current_pstate_driver) -			return -EINVAL; -		if (cppc_state == AMD_PSTATE_ACTIVE) -			return -EBUSY; -		cpufreq_unregister_driver(current_pstate_driver); -		amd_pstate_driver_cleanup(); -		break; -	case AMD_PSTATE_PASSIVE: -		if (current_pstate_driver) { -			if (current_pstate_driver == &amd_pstate_driver) -				return 0; -			cpufreq_unregister_driver(current_pstate_driver); -			cppc_state = AMD_PSTATE_PASSIVE; -			current_pstate_driver = &amd_pstate_driver; -		} +	mode_idx = get_mode_idx_from_str(buf, size); -		ret = cpufreq_register_driver(current_pstate_driver); -		break; -	case AMD_PSTATE_ACTIVE: -		if (current_pstate_driver) { -			if (current_pstate_driver == &amd_pstate_epp_driver) -				return 0; -			cpufreq_unregister_driver(current_pstate_driver); -			current_pstate_driver = &amd_pstate_epp_driver; -			cppc_state = AMD_PSTATE_ACTIVE; -		} +	if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX) +		return -EINVAL; -		ret = cpufreq_register_driver(current_pstate_driver); -		break; -	default: -		ret = -EINVAL; -		break; -	} +	if (mode_state_machine[cppc_state][mode_idx]) +		return mode_state_machine[cppc_state][mode_idx](mode_idx); -	return ret; +	return 0;  }  static ssize_t show_status(struct kobject *kobj, @@ -932,6 +1041,7 @@ static struct attribute *pstate_global_attributes[] = {  };  static const struct attribute_group amd_pstate_global_attr_group = { +	.name = "amd_pstate",  	.attrs = pstate_global_attributes,  }; @@ -998,7 +1108,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)  	policy->policy = CPUFREQ_POLICY_POWERSAVE;  	if (boot_cpu_has(X86_FEATURE_CPPC)) { -		policy->fast_switch_possible = true;  		ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);  		if (ret)  			return ret; @@ -1021,7 +1130,6 @@ free_cpudata1:  static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)  {  	pr_debug("CPU %d exiting\n", policy->cpu); -	policy->fast_switch_possible = false;  	return 0;  } @@ -1228,6 +1336,7 @@ static struct cpufreq_driver amd_pstate_driver = {  	.flags		= CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,  	.verify		= amd_pstate_verify,  	.target		= amd_pstate_target, +	.fast_switch    = amd_pstate_fast_switch,  	.init		= amd_pstate_cpu_init,  	.exit		= amd_pstate_cpu_exit,  	.suspend	= amd_pstate_cpu_suspend, @@ -1253,6 +1362,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = {  static int __init amd_pstate_init(void)  { +	struct device *dev_root;  	int ret;  	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) @@ -1279,7 +1389,7 @@ static int __init amd_pstate_init(void)  	/* capability check */  	if (boot_cpu_has(X86_FEATURE_CPPC)) {  		pr_debug("AMD CPPC MSR based functionality is supported\n"); -		if (cppc_state == AMD_PSTATE_PASSIVE) +		if (cppc_state != AMD_PSTATE_ACTIVE)  			current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;  	} else {  		pr_debug("AMD CPPC shared memory based functionality is supported\n"); @@ -1299,24 +1409,19 @@ static int __init amd_pstate_init(void)  	if (ret)  		pr_err("failed to register with return %d\n", ret); -	amd_pstate_kobj = kobject_create_and_add("amd_pstate", &cpu_subsys.dev_root->kobj); -	if (!amd_pstate_kobj) { -		ret = -EINVAL; -		pr_err("global sysfs registration failed.\n"); -		goto kobject_free; -	} - -	ret = sysfs_create_group(amd_pstate_kobj, &amd_pstate_global_attr_group); -	if (ret) { -		pr_err("sysfs attribute export failed with error %d.\n", ret); -		goto global_attr_free; +	dev_root = bus_get_dev_root(&cpu_subsys); +	if (dev_root) { +		ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group); +		put_device(dev_root); +		if (ret) { +			pr_err("sysfs attribute export failed with error %d.\n", ret); +			goto global_attr_free; +		}  	}  	return ret;  global_attr_free: -	kobject_put(amd_pstate_kobj); -kobject_free:  	cpufreq_unregister_driver(current_pstate_driver);  	return ret;  } @@ -1341,7 +1446,7 @@ static int __init amd_pstate_param(char *str)  		if (cppc_state == AMD_PSTATE_ACTIVE)  			current_pstate_driver = &amd_pstate_epp_driver; -		if (cppc_state == AMD_PSTATE_PASSIVE) +		if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)  			current_pstate_driver = &amd_pstate_driver;  		return 0;  |