diff options
Diffstat (limited to 'arch/x86/kernel/tsc.c')
| -rw-r--r-- | arch/x86/kernel/tsc.c | 55 | 
1 files changed, 29 insertions, 26 deletions
| diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 7437b41f6a47..c3f7602cd038 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -21,6 +21,7 @@  #include <asm/hypervisor.h>  #include <asm/nmi.h>  #include <asm/x86_init.h> +#include <asm/geode.h>  unsigned int __read_mostly cpu_khz;	/* TSC clocks / usec, not used here */  EXPORT_SYMBOL(cpu_khz); @@ -38,7 +39,7 @@ static int __read_mostly tsc_unstable;     erroneous rdtsc usage on !cpu_has_tsc processors */  static int __read_mostly tsc_disabled = -1; -static struct static_key __use_tsc = STATIC_KEY_INIT; +static DEFINE_STATIC_KEY_FALSE(__use_tsc);  int tsc_clocksource_reliable; @@ -248,7 +249,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)  	data = cyc2ns_write_begin(cpu); -	rdtscll(tsc_now); +	tsc_now = rdtsc();  	ns_now = cycles_2_ns(tsc_now);  	/* @@ -274,7 +275,12 @@ done:   */  u64 native_sched_clock(void)  { -	u64 tsc_now; +	if (static_branch_likely(&__use_tsc)) { +		u64 tsc_now = rdtsc(); + +		/* return the value in ns */ +		return cycles_2_ns(tsc_now); +	}  	/*  	 * Fall back to jiffies if there's no TSC available: @@ -284,16 +290,17 @@ u64 native_sched_clock(void)  	 *   very important for it to be as fast as the platform  	 *   can achieve it. )  	 */ -	if (!static_key_false(&__use_tsc)) { -		/* No locking but a rare wrong value is not a big deal: */ -		return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); -	} -	/* read the Time Stamp Counter: */ -	rdtscll(tsc_now); +	/* No locking but a rare wrong value is not a big deal: */ +	return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); +} -	/* return the value in ns */ -	return cycles_2_ns(tsc_now); +/* + * Generate a sched_clock if you already have a TSC value. + */ +u64 native_sched_clock_from_tsc(u64 tsc) +{ +	return cycles_2_ns(tsc);  }  /* We need to define a real function for sched_clock, to override the @@ -308,12 +315,6 @@ unsigned long long  sched_clock(void) __attribute__((alias("native_sched_clock")));  #endif -unsigned long long native_read_tsc(void) -{ -	return __native_read_tsc(); -} -EXPORT_SYMBOL(native_read_tsc); -  int check_tsc_unstable(void)  {  	return tsc_unstable; @@ -976,7 +977,7 @@ static struct clocksource clocksource_tsc;   */  static cycle_t read_tsc(struct clocksource *cs)  { -	return (cycle_t)get_cycles(); +	return (cycle_t)rdtsc_ordered();  }  /* @@ -1013,15 +1014,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);  static void __init check_system_tsc_reliable(void)  { -#ifdef CONFIG_MGEODE_LX -	/* RTSC counts during suspend */ +#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC) +	if (is_geode_lx()) { +		/* RTSC counts during suspend */  #define RTSC_SUSP 0x100 -	unsigned long res_low, res_high; +		unsigned long res_low, res_high; -	rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); -	/* Geode_LX - the OLPC CPU has a very reliable TSC */ -	if (res_low & RTSC_SUSP) -		tsc_clocksource_reliable = 1; +		rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); +		/* Geode_LX - the OLPC CPU has a very reliable TSC */ +		if (res_low & RTSC_SUSP) +			tsc_clocksource_reliable = 1; +	}  #endif  	if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))  		tsc_clocksource_reliable = 1; @@ -1210,7 +1213,7 @@ void __init tsc_init(void)  	/* now allow native_sched_clock() to use rdtsc */  	tsc_disabled = 0; -	static_key_slow_inc(&__use_tsc); +	static_branch_enable(&__use_tsc);  	if (!no_sched_irq_time)  		enable_sched_clock_irqtime(); |