diff options
Diffstat (limited to 'arch/x86/kernel/smpboot.c')
| -rw-r--r-- | arch/x86/kernel/smpboot.c | 72 | 
1 files changed, 2 insertions, 70 deletions
| diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 617012f4619f..2ef14772dc04 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -83,10 +83,6 @@  #include <asm/hw_irq.h>  #include <asm/stackprotector.h> -#ifdef CONFIG_ACPI_CPPC_LIB -#include <acpi/cppc_acpi.h> -#endif -  /* representing HT siblings of each logical CPU */  DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);  EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); @@ -155,8 +151,6 @@ static inline void smpboot_restore_warm_reset_vector(void)  	*((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;  } -static void init_freq_invariance(bool secondary, bool cppc_ready); -  /*   * Report back to the Boot Processor during boot time or to the caller processor   * during CPU online. @@ -2097,48 +2091,6 @@ out:  	return true;  } -#ifdef CONFIG_ACPI_CPPC_LIB -static bool amd_set_max_freq_ratio(void) -{ -	struct cppc_perf_caps perf_caps; -	u64 highest_perf, nominal_perf; -	u64 perf_ratio; -	int rc; - -	rc = cppc_get_perf_caps(0, &perf_caps); -	if (rc) { -		pr_debug("Could not retrieve perf counters (%d)\n", rc); -		return false; -	} - -	highest_perf = amd_get_highest_perf(); -	nominal_perf = perf_caps.nominal_perf; - -	if (!highest_perf || !nominal_perf) { -		pr_debug("Could not retrieve highest or nominal performance\n"); -		return false; -	} - -	perf_ratio = div_u64(highest_perf * SCHED_CAPACITY_SCALE, nominal_perf); -	/* midpoint between max_boost and max_P */ -	perf_ratio = (perf_ratio + SCHED_CAPACITY_SCALE) >> 1; -	if (!perf_ratio) { -		pr_debug("Non-zero highest/nominal perf values led to a 0 ratio\n"); -		return false; -	} - -	arch_turbo_freq_ratio = perf_ratio; -	arch_set_max_freq_ratio(false); - -	return true; -} -#else -static bool amd_set_max_freq_ratio(void) -{ -	return false; -} -#endif -  static void init_counter_refs(void)  {  	u64 aperf, mperf; @@ -2167,7 +2119,7 @@ static void register_freq_invariance_syscore_ops(void)  static inline void register_freq_invariance_syscore_ops(void) {}  #endif -static void init_freq_invariance(bool secondary, bool cppc_ready) +void init_freq_invariance(bool secondary, bool cppc_ready)  {  	bool ret = false; @@ -2187,7 +2139,7 @@ static void init_freq_invariance(bool secondary, bool cppc_ready)  		if (!cppc_ready) {  			return;  		} -		ret = amd_set_max_freq_ratio(); +		ret = amd_set_max_freq_ratio(&arch_turbo_freq_ratio);  	}  	if (ret) { @@ -2200,22 +2152,6 @@ static void init_freq_invariance(bool secondary, bool cppc_ready)  	}  } -#ifdef CONFIG_ACPI_CPPC_LIB -static DEFINE_MUTEX(freq_invariance_lock); - -void init_freq_invariance_cppc(void) -{ -	static bool secondary; - -	mutex_lock(&freq_invariance_lock); - -	init_freq_invariance(secondary, true); -	secondary = true; - -	mutex_unlock(&freq_invariance_lock); -} -#endif -  static void disable_freq_invariance_workfn(struct work_struct *work)  {  	static_branch_disable(&arch_scale_freq_key); @@ -2264,8 +2200,4 @@ error:  	pr_warn("Scheduler frequency invariance went wobbly, disabling!\n");  	schedule_work(&disable_freq_invariance_work);  } -#else -static inline void init_freq_invariance(bool secondary, bool cppc_ready) -{ -}  #endif /* CONFIG_X86_64 */ |