diff options
Diffstat (limited to 'arch/x86/kernel/tsc_sync.c')
| -rw-r--r-- | arch/x86/kernel/tsc_sync.c | 41 | 
1 files changed, 41 insertions, 0 deletions
| diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 50a4515fe0ad..9452dc9664b5 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -30,6 +30,7 @@ struct tsc_adjust {  };  static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust); +static struct timer_list tsc_sync_check_timer;  /*   * TSC's on different sockets may be reset asynchronously. @@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume)  	}  } +/* + * Normally the tsc_sync will be checked every time system enters idle + * state, but there is still caveat that a system won't enter idle, + * either because it's too busy or configured purposely to not enter + * idle. + * + * So setup a periodic timer (every 10 minutes) to make sure the check + * is always on. + */ + +#define SYNC_CHECK_INTERVAL		(HZ * 600) + +static void tsc_sync_check_timer_fn(struct timer_list *unused) +{ +	int next_cpu; + +	tsc_verify_tsc_adjust(false); + +	/* Run the check for all onlined CPUs in turn */ +	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); +	if (next_cpu >= nr_cpu_ids) +		next_cpu = cpumask_first(cpu_online_mask); + +	tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL; +	add_timer_on(&tsc_sync_check_timer, next_cpu); +} + +static int __init start_sync_check_timer(void) +{ +	if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable) +		return 0; + +	timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0); +	tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL; +	add_timer(&tsc_sync_check_timer); + +	return 0; +} +late_initcall(start_sync_check_timer); +  static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,  				   unsigned int cpu, bool bootcpu)  { |