diff options
Diffstat (limited to 'kernel/time/timekeeping.c')
| -rw-r--r-- | kernel/time/timekeeping.c | 96 | 
1 files changed, 49 insertions, 47 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index b58dffc58a8f..4e18db1819f8 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -237,7 +237,9 @@ static void timekeeping_check_update(struct timekeeper *tk, u64 offset)  	}  } -static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr) +static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles); + +static inline u64 timekeeping_debug_get_ns(const struct tk_read_base *tkr)  {  	struct timekeeper *tk = &tk_core.timekeeper;  	u64 now, last, mask, max, delta; @@ -264,34 +266,23 @@ static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)  	 * Try to catch underflows by checking if we are seeing small  	 * mask-relative negative values.  	 */ -	if (unlikely((~delta & mask) < (mask >> 3))) { +	if (unlikely((~delta & mask) < (mask >> 3)))  		tk->underflow_seen = 1; -		delta = 0; -	} -	/* Cap delta value to the max_cycles values to avoid mult overflows */ -	if (unlikely(delta > max)) { +	/* Check for multiplication overflows */ +	if (unlikely(delta > max))  		tk->overflow_seen = 1; -		delta = tkr->clock->max_cycles; -	} -	return delta; +	/* timekeeping_cycles_to_ns() handles both under and overflow */ +	return timekeeping_cycles_to_ns(tkr, now);  }  #else  static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)  {  } -static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr) +static inline u64 timekeeping_debug_get_ns(const struct tk_read_base *tkr)  { -	u64 cycle_now, delta; - -	/* read clocksource */ -	cycle_now = tk_clock_read(tkr); - -	/* calculate the delta since the last update_wall_time */ -	delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); - -	return delta; +	BUG();  }  #endif @@ -370,32 +361,46 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)  }  /* Timekeeper helper functions. */ +static noinline u64 delta_to_ns_safe(const struct tk_read_base *tkr, u64 delta) +{ +	return mul_u64_u32_add_u64_shr(delta, tkr->mult, tkr->xtime_nsec, tkr->shift); +} -static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta) +static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)  { -	u64 nsec; +	/* Calculate the delta since the last update_wall_time() */ +	u64 mask = tkr->mask, delta = (cycles - tkr->cycle_last) & mask; -	nsec = delta * tkr->mult + tkr->xtime_nsec; -	nsec >>= tkr->shift; +	/* +	 * This detects both negative motion and the case where the delta +	 * overflows the multiplication with tkr->mult. +	 */ +	if (unlikely(delta > tkr->clock->max_cycles)) { +		/* +		 * Handle clocksource inconsistency between CPUs to prevent +		 * time from going backwards by checking for the MSB of the +		 * mask being set in the delta. +		 */ +		if (delta & ~(mask >> 1)) +			return tkr->xtime_nsec >> tkr->shift; + +		return delta_to_ns_safe(tkr, delta); +	} -	return nsec; +	return ((delta * tkr->mult) + tkr->xtime_nsec) >> tkr->shift;  } -static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr) +static __always_inline u64 __timekeeping_get_ns(const struct tk_read_base *tkr)  { -	u64 delta; - -	delta = timekeeping_get_delta(tkr); -	return timekeeping_delta_to_ns(tkr, delta); +	return timekeeping_cycles_to_ns(tkr, tk_clock_read(tkr));  } -static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles) +static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)  { -	u64 delta; +	if (IS_ENABLED(CONFIG_DEBUG_TIMEKEEPING)) +		return timekeeping_debug_get_ns(tkr); -	/* calculate the delta since the last update_wall_time */ -	delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask); -	return timekeeping_delta_to_ns(tkr, delta); +	return __timekeeping_get_ns(tkr);  }  /** @@ -431,14 +436,6 @@ static void update_fast_timekeeper(const struct tk_read_base *tkr,  	memcpy(base + 1, base, sizeof(*base));  } -static __always_inline u64 fast_tk_get_delta_ns(struct tk_read_base *tkr) -{ -	u64 delta, cycles = tk_clock_read(tkr); - -	delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask); -	return timekeeping_delta_to_ns(tkr, delta); -} -  static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)  {  	struct tk_read_base *tkr; @@ -449,7 +446,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)  		seq = raw_read_seqcount_latch(&tkf->seq);  		tkr = tkf->base + (seq & 0x01);  		now = ktime_to_ns(tkr->base); -		now += fast_tk_get_delta_ns(tkr); +		now += __timekeeping_get_ns(tkr);  	} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));  	return now; @@ -565,7 +562,7 @@ static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)  		tkr = tkf->base + (seq & 0x01);  		basem = ktime_to_ns(tkr->base);  		baser = ktime_to_ns(tkr->base_real); -		delta = fast_tk_get_delta_ns(tkr); +		delta = __timekeeping_get_ns(tkr);  	} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));  	if (mono) @@ -800,10 +797,15 @@ static void timekeeping_forward_now(struct timekeeper *tk)  	tk->tkr_mono.cycle_last = cycle_now;  	tk->tkr_raw.cycle_last  = cycle_now; -	tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult; -	tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult; +	while (delta > 0) { +		u64 max = tk->tkr_mono.clock->max_cycles; +		u64 incr = delta < max ? delta : max; -	tk_normalize_xtime(tk); +		tk->tkr_mono.xtime_nsec += incr * tk->tkr_mono.mult; +		tk->tkr_raw.xtime_nsec += incr * tk->tkr_raw.mult; +		tk_normalize_xtime(tk); +		delta -= incr; +	}  }  /**  |