aboutsummaryrefslogtreecommitdiff
path: root/kernel/time
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/alarmtimer.c9
-rw-r--r--kernel/time/clockevents.c2
-rw-r--r--kernel/time/clocksource.c47
-rw-r--r--kernel/time/hrtimer.c24
-rw-r--r--kernel/time/ntp.c19
-rw-r--r--kernel/time/ntp_internal.h4
-rw-r--r--kernel/time/posix-cpu-timers.c207
-rw-r--r--kernel/time/posix-timers.c73
-rw-r--r--kernel/time/posix-timers.h3
-rw-r--r--kernel/time/tick-broadcast.c3
-rw-r--r--kernel/time/timekeeping.c6
-rw-r--r--kernel/time/timer.c66
-rw-r--r--kernel/time/timer_migration.c393
-rw-r--r--kernel/time/timer_migration.h27
14 files changed, 460 insertions, 423 deletions
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 5abfa4390673..8bf888641694 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -493,7 +493,7 @@ static u64 __alarm_forward_now(struct alarm *alarm, ktime_t interval, bool throt
* promised in the context of posix_timer_fn() never
* materialized, but someone should really work on it.
*
- * To prevent DOS fake @now to be 1 jiffie out which keeps
+ * To prevent DOS fake @now to be 1 jiffy out which keeps
* the overrun accounting correct but creates an
* inconsistency vs. timer_gettime(2).
*/
@@ -574,15 +574,10 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
it.alarm.alarmtimer);
enum alarmtimer_restart result = ALARMTIMER_NORESTART;
unsigned long flags;
- int si_private = 0;
spin_lock_irqsave(&ptr->it_lock, flags);
- ptr->it_active = 0;
- if (ptr->it_interval)
- si_private = ++ptr->it_requeue_pending;
-
- if (posix_timer_event(ptr, si_private) && ptr->it_interval) {
+ if (posix_timer_queue_signal(ptr) && ptr->it_interval) {
/*
* Handle ignored signals and rearm the timer. This will go
* away once we handle ignored signals proper. Ensure that
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 60a6484831b1..78c7bd64d0dd 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -190,7 +190,7 @@ int clockevents_tick_resume(struct clock_event_device *dev)
#ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
-/* Limit min_delta to a jiffie */
+/* Limit min_delta to a jiffy */
#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
/**
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index d25ba49e313c..23336eecb4f4 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -113,7 +113,6 @@ static u64 suspend_start;
/*
* Threshold: 0.0312s, when doubled: 0.0625s.
- * Also a default for cs->uncertainty_margin when registering clocks.
*/
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 5)
@@ -125,6 +124,13 @@ static u64 suspend_start;
*
* The default of 500 parts per million is based on NTP's limits.
* If a clocksource is good enough for NTP, it is good enough for us!
+ *
+ * In other words, by default, even if a clocksource is extremely
+ * precise (for example, with a sub-nanosecond period), the maximum
+ * permissible skew between the clocksource watchdog and the clocksource
+ * under test is not permitted to go below the 500ppm minimum defined
+ * by MAX_SKEW_USEC. This 500ppm minimum may be overridden using the
+ * CLOCKSOURCE_WATCHDOG_MAX_SKEW_US Kconfig option.
*/
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US
#define MAX_SKEW_USEC CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US
@@ -132,6 +138,13 @@ static u64 suspend_start;
#define MAX_SKEW_USEC (125 * WATCHDOG_INTERVAL / HZ)
#endif
+/*
+ * Default for maximum permissible skew when cs->uncertainty_margin is
+ * not specified, and the lower bound even when cs->uncertainty_margin
+ * is specified. This is also the default that is used when registering
+ * clocks with unspecifed cs->uncertainty_margin, so this macro is used
+ * even in CONFIG_CLOCKSOURCE_WATCHDOG=n kernels.
+ */
#define WATCHDOG_MAX_SKEW (MAX_SKEW_USEC * NSEC_PER_USEC)
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
@@ -231,6 +244,7 @@ enum wd_read_status {
static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
{
+ int64_t md = 2 * watchdog->uncertainty_margin;
unsigned int nretries, max_retries;
int64_t wd_delay, wd_seq_delay;
u64 wd_end, wd_end2;
@@ -245,8 +259,8 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
local_irq_enable();
wd_delay = cycles_to_nsec_safe(watchdog, *wdnow, wd_end);
- if (wd_delay <= WATCHDOG_MAX_SKEW) {
- if (nretries > 1 || nretries >= max_retries) {
+ if (wd_delay <= md + cs->uncertainty_margin) {
+ if (nretries > 1 && nretries >= max_retries) {
pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
smp_processor_id(), watchdog->name, nretries);
}
@@ -258,12 +272,12 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
* there is too much external interferences that cause
* significant delay in reading both clocksource and watchdog.
*
- * If consecutive WD read-back delay > WATCHDOG_MAX_SKEW/2,
- * report system busy, reinit the watchdog and skip the current
+ * If consecutive WD read-back delay > md, report
+ * system busy, reinit the watchdog and skip the current
* watchdog test.
*/
wd_seq_delay = cycles_to_nsec_safe(watchdog, wd_end, wd_end2);
- if (wd_seq_delay > WATCHDOG_MAX_SKEW/2)
+ if (wd_seq_delay > md)
goto skip_test;
}
@@ -1146,14 +1160,19 @@ void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq
}
/*
- * If the uncertainty margin is not specified, calculate it.
- * If both scale and freq are non-zero, calculate the clock
- * period, but bound below at 2*WATCHDOG_MAX_SKEW. However,
- * if either of scale or freq is zero, be very conservative and
- * take the tens-of-milliseconds WATCHDOG_THRESHOLD value for the
- * uncertainty margin. Allow stupidly small uncertainty margins
- * to be specified by the caller for testing purposes, but warn
- * to discourage production use of this capability.
+ * If the uncertainty margin is not specified, calculate it. If
+ * both scale and freq are non-zero, calculate the clock period, but
+ * bound below at 2*WATCHDOG_MAX_SKEW, that is, 500ppm by default.
+ * However, if either of scale or freq is zero, be very conservative
+ * and take the tens-of-milliseconds WATCHDOG_THRESHOLD value
+ * for the uncertainty margin. Allow stupidly small uncertainty
+ * margins to be specified by the caller for testing purposes,
+ * but warn to discourage production use of this capability.
+ *
+ * Bottom line: The sum of the uncertainty margins of the
+ * watchdog clocksource and the clocksource under test will be at
+ * least 500ppm by default. For more information, please see the
+ * comment preceding CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US above.
*/
if (scale && freq && !cs->uncertainty_margin) {
cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq);
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index b8ee320208d4..12eb40d6290e 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1177,7 +1177,7 @@ static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
/*
* CONFIG_TIME_LOW_RES indicates that the system has no way to return
* granular time values. For relative timers we add hrtimer_resolution
- * (i.e. one jiffie) to prevent short timeouts.
+ * (i.e. one jiffy) to prevent short timeouts.
*/
timer->is_rel = mode & HRTIMER_MODE_REL;
if (timer->is_rel)
@@ -1351,11 +1351,13 @@ static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base)
}
static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base)
+ __acquires(&base->softirq_expiry_lock)
{
spin_lock(&base->softirq_expiry_lock);
}
static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base)
+ __releases(&base->softirq_expiry_lock)
{
spin_unlock(&base->softirq_expiry_lock);
}
@@ -1757,7 +1759,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
}
}
-static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
+static __latent_entropy void hrtimer_run_softirq(void)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
unsigned long flags;
@@ -2072,14 +2074,9 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
struct restart_block *restart;
struct hrtimer_sleeper t;
int ret = 0;
- u64 slack;
-
- slack = current->timer_slack_ns;
- if (rt_task(current))
- slack = 0;
hrtimer_init_sleeper_on_stack(&t, clockid, mode);
- hrtimer_set_expires_range_ns(&t.timer, rqtp, slack);
+ hrtimer_set_expires_range_ns(&t.timer, rqtp, current->timer_slack_ns);
ret = do_nanosleep(&t, mode);
if (ret != -ERESTART_RESTARTBLOCK)
goto out;
@@ -2249,7 +2246,7 @@ void __init hrtimers_init(void)
/**
* schedule_hrtimeout_range_clock - sleep until timeout
* @expires: timeout value (ktime_t)
- * @delta: slack in expires timeout (ktime_t) for SCHED_OTHER tasks
+ * @delta: slack in expires timeout (ktime_t)
* @mode: timer mode
* @clock_id: timer clock to be used
*/
@@ -2276,13 +2273,6 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
return -EINTR;
}
- /*
- * Override any slack passed by the user if under
- * rt contraints.
- */
- if (rt_task(current))
- delta = 0;
-
hrtimer_init_sleeper_on_stack(&t, clock_id, mode);
hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
hrtimer_sleeper_start_expires(&t, mode);
@@ -2302,7 +2292,7 @@ EXPORT_SYMBOL_GPL(schedule_hrtimeout_range_clock);
/**
* schedule_hrtimeout_range - sleep until timeout
* @expires: timeout value (ktime_t)
- * @delta: slack in expires timeout (ktime_t) for SCHED_OTHER tasks
+ * @delta: slack in expires timeout (ktime_t)
* @mode: timer mode
*
* Make the current task sleep until the given expiry time has
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 406dccb79c2b..802b336f4b8c 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -660,9 +660,17 @@ rearm:
sched_sync_hw_clock(offset_nsec, res != 0);
}
-void ntp_notify_cmos_timer(void)
+void ntp_notify_cmos_timer(bool offset_set)
{
/*
+ * If the time jumped (using ADJ_SETOFFSET) cancels sync timer,
+ * which may have been running if the time was synchronized
+ * prior to the ADJ_SETOFFSET call.
+ */
+ if (offset_set)
+ hrtimer_cancel(&sync_hrtimer);
+
+ /*
* When the work is currently executed but has not yet the timer
* rearmed this queues the work immediately again. No big issue,
* just a pointless work scheduled.
@@ -727,17 +735,16 @@ static inline void process_adjtimex_modes(const struct __kernel_timex *txc,
}
if (txc->modes & ADJ_MAXERROR)
- time_maxerror = txc->maxerror;
+ time_maxerror = clamp(txc->maxerror, 0, NTP_PHASE_LIMIT);
if (txc->modes & ADJ_ESTERROR)
- time_esterror = txc->esterror;
+ time_esterror = clamp(txc->esterror, 0, NTP_PHASE_LIMIT);
if (txc->modes & ADJ_TIMECONST) {
- time_constant = txc->constant;
+ time_constant = clamp(txc->constant, 0, MAXTC);
if (!(time_status & STA_NANO))
time_constant += 4;
- time_constant = min(time_constant, (long)MAXTC);
- time_constant = max(time_constant, 0l);
+ time_constant = clamp(time_constant, 0, MAXTC);
}
if (txc->modes & ADJ_TAI &&
diff --git a/kernel/time/ntp_internal.h b/kernel/time/ntp_internal.h
index 23d1b74c3065..5a633dce9057 100644
--- a/kernel/time/ntp_internal.h
+++ b/kernel/time/ntp_internal.h
@@ -14,9 +14,9 @@ extern int __do_adjtimex(struct __kernel_timex *txc,
extern void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts);
#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
-extern void ntp_notify_cmos_timer(void);
+extern void ntp_notify_cmos_timer(bool offset_set);
#else
-static inline void ntp_notify_cmos_timer(void) { }
+static inline void ntp_notify_cmos_timer(bool offset_set) { }
#endif
#endif /* _LINUX_NTP_INTERNAL_H */
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index e9c6f9d0e42c..6bcee4704059 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -453,6 +453,7 @@ static void disarm_timer(struct k_itimer *timer, struct task_struct *p)
struct cpu_timer *ctmr = &timer->it.cpu;
struct posix_cputimer_base *base;
+ timer->it_active = 0;
if (!cpu_timer_dequeue(ctmr))
return;
@@ -559,6 +560,7 @@ static void arm_timer(struct k_itimer *timer, struct task_struct *p)
struct cpu_timer *ctmr = &timer->it.cpu;
u64 newexp = cpu_timer_getexpires(ctmr);
+ timer->it_active = 1;
if (!cpu_timer_enqueue(&base->tqhead, ctmr))
return;
@@ -584,12 +586,8 @@ static void cpu_timer_fire(struct k_itimer *timer)
{
struct cpu_timer *ctmr = &timer->it.cpu;
- if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
- /*
- * User don't want any signal.
- */
- cpu_timer_setexpires(ctmr, 0);
- } else if (unlikely(timer->sigq == NULL)) {
+ timer->it_active = 0;
+ if (unlikely(timer->sigq == NULL)) {
/*
* This a special case for clock_nanosleep,
* not a normal timer from sys_timer_create.
@@ -600,9 +598,9 @@ static void cpu_timer_fire(struct k_itimer *timer)
/*
* One-shot timer. Clear it as soon as it's fired.
*/
- posix_timer_event(timer, 0);
+ posix_timer_queue_signal(timer);
cpu_timer_setexpires(ctmr, 0);
- } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
+ } else if (posix_timer_queue_signal(timer)) {
/*
* The signal did not get queued because the signal
* was ignored, so we won't get any callback to
@@ -614,6 +612,8 @@ static void cpu_timer_fire(struct k_itimer *timer)
}
}
+static void __posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp, u64 now);
+
/*
* Guts of sys_timer_settime for CPU timers.
* This is called with the timer locked and interrupts disabled.
@@ -623,9 +623,10 @@ static void cpu_timer_fire(struct k_itimer *timer)
static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
struct itimerspec64 *new, struct itimerspec64 *old)
{
+ bool sigev_none = timer->it_sigev_notify == SIGEV_NONE;
clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
- u64 old_expires, new_expires, old_incr, val;
struct cpu_timer *ctmr = &timer->it.cpu;
+ u64 old_expires, new_expires, now;
struct sighand_struct *sighand;
struct task_struct *p;
unsigned long flags;
@@ -662,10 +663,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
return -ESRCH;
}
- /*
- * Disarm any old timer after extracting its expiry time.
- */
- old_incr = timer->it_interval;
+ /* Retrieve the current expiry time before disarming the timer */
old_expires = cpu_timer_getexpires(ctmr);
if (unlikely(timer->it.cpu.firing)) {
@@ -673,157 +671,122 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
ret = TIMER_RETRY;
} else {
cpu_timer_dequeue(ctmr);
+ timer->it_active = 0;
}
/*
- * We need to sample the current value to convert the new
- * value from to relative and absolute, and to convert the
- * old value from absolute to relative. To set a process
- * timer, we need a sample to balance the thread expiry
- * times (in arm_timer). With an absolute time, we must
- * check if it's already passed. In short, we need a sample.
+ * Sample the current clock for saving the previous setting
+ * and for rearming the timer.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock))
- val = cpu_clock_sample(clkid, p);
+ now = cpu_clock_sample(clkid, p);
else
- val = cpu_clock_sample_group(clkid, p, true);
+ now = cpu_clock_sample_group(clkid, p, !sigev_none);
+ /* Retrieve the previous expiry value if requested. */
if (old) {
- if (old_expires == 0) {
- old->it_value.tv_sec = 0;
- old->it_value.tv_nsec = 0;
- } else {
- /*
- * Update the timer in case it has overrun already.
- * If it has, we'll report it as having overrun and
- * with the next reloaded timer already ticking,
- * though we are swallowing that pending
- * notification here to install the new setting.
- */
- u64 exp = bump_cpu_timer(timer, val);
-
- if (val < exp) {
- old_expires = exp - val;
- old->it_value = ns_to_timespec64(old_expires);
- } else {
- old->it_value.tv_nsec = 1;
- old->it_value.tv_sec = 0;
- }
- }
+ old->it_value = (struct timespec64){ };
+ if (old_expires)
+ __posix_cpu_timer_get(timer, old, now);
}
+ /* Retry if the timer expiry is running concurrently */
if (unlikely(ret)) {
- /*
- * We are colliding with the timer actually firing.
- * Punt after filling in the timer's old value, and
- * disable this firing since we are already reporting
- * it as an overrun (thanks to bump_cpu_timer above).
- */
unlock_task_sighand(p, &flags);
goto out;
}
- if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
- new_expires += val;
- }
+ /* Convert relative expiry time to absolute */
+ if (new_expires && !(timer_flags & TIMER_ABSTIME))
+ new_expires += now;
+
+ /* Set the new expiry time (might be 0) */
+ cpu_timer_setexpires(ctmr, new_expires);
/*
- * Install the new expiry time (or zero).
- * For a timer with no notification action, we don't actually
- * arm the timer (we'll just fake it for timer_gettime).
+ * Arm the timer if it is not disabled, the new expiry value has
+ * not yet expired and the timer requires signal delivery.
+ * SIGEV_NONE timers are never armed. In case the timer is not
+ * armed, enforce the reevaluation of the timer base so that the
+ * process wide cputime counter can be disabled eventually.
*/
- cpu_timer_setexpires(ctmr, new_expires);
- if (new_expires != 0 && val < new_expires) {
- arm_timer(timer, p);
+ if (likely(!sigev_none)) {
+ if (new_expires && now < new_expires)
+ arm_timer(timer, p);
+ else
+ trigger_base_recalc_expires(timer, p);
}
unlock_task_sighand(p, &flags);
+
+ posix_timer_set_common(timer, new);
+
/*
- * Install the new reload setting, and
- * set up the signal and overrun bookkeeping.
+ * If the new expiry time was already in the past the timer was not
+ * queued. Fire it immediately even if the thread never runs to
+ * accumulate more time on this clock.
*/
- timer->it_interval = timespec64_to_ktime(new->it_interval);
+ if (!sigev_none && new_expires && now >= new_expires)
+ cpu_timer_fire(timer);
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+static void __posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp, u64 now)
+{
+ bool sigev_none = timer->it_sigev_notify == SIGEV_NONE;
+ u64 expires, iv = timer->it_interval;
/*
- * This acts as a modification timestamp for the timer,
- * so any automatic reload attempt will punt on seeing
- * that we have reset the timer manually.
+ * Make sure that interval timers are moved forward for the
+ * following cases:
+ * - SIGEV_NONE timers which are never armed
+ * - Timers which expired, but the signal has not yet been
+ * delivered
*/
- timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
- ~REQUEUE_PENDING;
- timer->it_overrun_last = 0;
- timer->it_overrun = -1;
-
- if (val >= new_expires) {
- if (new_expires != 0) {
- /*
- * The designated time already passed, so we notify
- * immediately, even if the thread never runs to
- * accumulate more time on this clock.
- */
- cpu_timer_fire(timer);
- }
+ if (iv && ((timer->it_requeue_pending & REQUEUE_PENDING) || sigev_none))
+ expires = bump_cpu_timer(timer, now);
+ else
+ expires = cpu_timer_getexpires(&timer->it.cpu);
+ /*
+ * Expired interval timers cannot have a remaining time <= 0.
+ * The kernel has to move them forward so that the next
+ * timer expiry is > @now.
+ */
+ if (now < expires) {
+ itp->it_value = ns_to_timespec64(expires - now);
+ } else {
/*
- * Make sure we don't keep around the process wide cputime
- * counter or the tick dependency if they are not necessary.
+ * A single shot SIGEV_NONE timer must return 0, when it is
+ * expired! Timers which have a real signal delivery mode
+ * must return a remaining time greater than 0 because the
+ * signal has not yet been delivered.
*/
- sighand = lock_task_sighand(p, &flags);
- if (!sighand)
- goto out;
-
- if (!cpu_timer_queued(ctmr))
- trigger_base_recalc_expires(timer, p);
-
- unlock_task_sighand(p, &flags);
+ if (!sigev_none)
+ itp->it_value.tv_nsec = 1;
}
- out:
- rcu_read_unlock();
- if (old)
- old->it_interval = ns_to_timespec64(old_incr);
-
- return ret;
}
static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
{
clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
- struct cpu_timer *ctmr = &timer->it.cpu;
- u64 now, expires = cpu_timer_getexpires(ctmr);
struct task_struct *p;
+ u64 now;
rcu_read_lock();
p = cpu_timer_task_rcu(timer);
- if (!p)
- goto out;
+ if (p && cpu_timer_getexpires(&timer->it.cpu)) {
+ itp->it_interval = ktime_to_timespec64(timer->it_interval);
- /*
- * Easy part: convert the reload time.
- */
- itp->it_interval = ktime_to_timespec64(timer->it_interval);
-
- if (!expires)
- goto out;
-
- /*
- * Sample the clock to take the difference with the expiry time.
- */
- if (CPUCLOCK_PERTHREAD(timer->it_clock))
- now = cpu_clock_sample(clkid, p);
- else
- now = cpu_clock_sample_group(clkid, p, false);
+ if (CPUCLOCK_PERTHREAD(timer->it_clock))
+ now = cpu_clock_sample(clkid, p);
+ else
+ now = cpu_clock_sample_group(clkid, p, false);
- if (now < expires) {
- itp->it_value = ns_to_timespec64(expires - now);
- } else {
- /*
- * The timer should have expired already, but the firing
- * hasn't taken place yet. Say it's just about to expire.
- */
- itp->it_value.tv_nsec = 1;
- itp->it_value.tv_sec = 0;
+ __posix_cpu_timer_get(timer, itp, now);
}
-out:
rcu_read_unlock();
}
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index b924f0f096fa..4576aaed13b2 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -277,10 +277,17 @@ void posixtimer_rearm(struct kernel_siginfo *info)
unlock_timer(timr, flags);
}
-int posix_timer_event(struct k_itimer *timr, int si_private)
+int posix_timer_queue_signal(struct k_itimer *timr)
{
+ int ret, si_private = 0;
enum pid_type type;
- int ret;
+
+ lockdep_assert_held(&timr->it_lock);
+
+ timr->it_active = 0;
+ if (timr->it_interval)
+ si_private = ++timr->it_requeue_pending;
+
/*
* FIXME: if ->sigq is queued we can race with
* dequeue_signal()->posixtimer_rearm().
@@ -309,19 +316,13 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
*/
static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
{
+ struct k_itimer *timr = container_of(timer, struct k_itimer, it.real.timer);
enum hrtimer_restart ret = HRTIMER_NORESTART;
- struct k_itimer *timr;
unsigned long flags;
- int si_private = 0;
- timr = container_of(timer, struct k_itimer, it.real.timer);
spin_lock_irqsave(&timr->it_lock, flags);
- timr->it_active = 0;
- if (timr->it_interval != 0)
- si_private = ++timr->it_requeue_pending;
-
- if (posix_timer_event(timr, si_private)) {
+ if (posix_timer_queue_signal(timr)) {
/*
* The signal was not queued due to SIG_IGN. As a
* consequence the timer is not going to be rearmed from
@@ -338,14 +339,14 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
* change to the signal handling code.
*
* For now let timers with an interval less than a
- * jiffie expire every jiffie and recheck for a
+ * jiffy expire every jiffy and recheck for a
* valid signal handler.
*
* This avoids interrupt starvation in case of a
* very small interval, which would expire the
* timer immediately again.
*
- * Moving now ahead of time by one jiffie tricks
+ * Moving now ahead of time by one jiffy tricks
* hrtimer_forward() to expire the timer later,
* while it still maintains the overrun accuracy
* for the price of a slight inconsistency in the
@@ -515,7 +516,7 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event,
spin_lock_irq(&current->sighand->siglock);
/* This makes the timer valid in the hash table */
WRITE_ONCE(new_timer->it_signal, current->signal);
- list_add(&new_timer->list, &current->signal->posix_timers);
+ hlist_add_head(&new_timer->list, &current->signal->posix_timers);
spin_unlock_irq(&current->sighand->siglock);
/*
* After unlocking sighand::siglock @new_timer is subject to
@@ -856,6 +857,23 @@ static struct k_itimer *timer_wait_running(struct k_itimer *timer,
return lock_timer(timer_id, flags);
}
+/*
+ * Set up the new interval and reset the signal delivery data
+ */
+void posix_timer_set_common(struct k_itimer *timer, struct itimerspec64 *new_setting)
+{
+ if (new_setting->it_value.tv_sec || new_setting->it_value.tv_nsec)
+ timer->it_interval = timespec64_to_ktime(new_setting->it_interval);
+ else
+ timer->it_interval = 0;
+
+ /* Prevent reloading in case there is a signal pending */
+ timer->it_requeue_pending = (timer->it_requeue_pending + 2) & ~REQUEUE_PENDING;
+ /* Reset overrun accounting */
+ timer->it_overrun_last = 0;
+ timer->it_overrun = -1LL;
+}
+
/* Set a POSIX.1b interval timer. */
int common_timer_set(struct k_itimer *timr, int flags,
struct itimerspec64 *new_setting,
@@ -878,15 +896,12 @@ int common_timer_set(struct k_itimer *timr, int flags,
return TIMER_RETRY;
timr->it_active = 0;
- timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
- ~REQUEUE_PENDING;
- timr->it_overrun_last = 0;
+ posix_timer_set_common(timr, new_setting);
- /* Switch off the timer when it_value is zero */
+ /* Keep timer disarmed when it_value is zero */
if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
return 0;
- timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
expires = timespec64_to_ktime(new_setting->it_value);
if (flags & TIMER_ABSTIME)
expires = timens_ktime_to_host(timr->it_clock, expires);
@@ -904,7 +919,7 @@ static int do_timer_settime(timer_t timer_id, int tmr_flags,
const struct k_clock *kc;
struct k_itimer *timr;
unsigned long flags;
- int error = 0;
+ int error;
if (!timespec64_valid(&new_spec64->it_interval) ||
!timespec64_valid(&new_spec64->it_value))
@@ -918,6 +933,9 @@ retry:
if (!timr)
return -EINVAL;
+ if (old_spec64)
+ old_spec64->it_interval = ktime_to_timespec64(timr->it_interval);
+
kc = timr->kclock;
if (WARN_ON_ONCE(!kc || !kc->timer_set))
error = -EINVAL;
@@ -1021,7 +1039,7 @@ retry_delete:
}
spin_lock(&current->sighand->siglock);
- list_del(&timer->list);
+ hlist_del(&timer->list);
spin_unlock(&current->sighand->siglock);
/*
* A concurrent lookup could check timer::it_signal lockless. It
@@ -1071,7 +1089,7 @@ retry_delete:
goto retry_delete;
}
- list_del(&timer->list);
+ hlist_del(&timer->list);
/*
* Setting timer::it_signal to NULL is technically not required
@@ -1092,22 +1110,19 @@ retry_delete:
*/
void exit_itimers(struct task_struct *tsk)
{
- struct list_head timers;
- struct k_itimer *tmr;
+ struct hlist_head timers;
- if (list_empty(&tsk->signal->posix_timers))
+ if (hlist_empty(&tsk->signal->posix_timers))
return;
/* Protect against concurrent read via /proc/$PID/timers */
spin_lock_irq(&tsk->sighand->siglock);
- list_replace_init(&tsk->signal->posix_timers, &timers);
+ hlist_move_list(&tsk->signal->posix_timers, &timers);
spin_unlock_irq(&tsk->sighand->siglock);
/* The timers are not longer accessible via tsk::signal */
- while (!list_empty(&timers)) {
- tmr = list_first_entry(&timers, struct k_itimer, list);
- itimer_delete(tmr);
- }
+ while (!hlist_empty(&timers))
+ itimer_delete(hlist_entry(timers.first, struct k_itimer, list));
}
SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
diff --git a/kernel/time/posix-timers.h b/kernel/time/posix-timers.h
index f32a2ebba9b8..4784ea65f685 100644
--- a/kernel/time/posix-timers.h
+++ b/kernel/time/posix-timers.h
@@ -36,10 +36,11 @@ extern const struct k_clock clock_process;
extern const struct k_clock clock_thread;
extern const struct k_clock alarm_clock;
-int posix_timer_event(struct k_itimer *timr, int si_private);
+int posix_timer_queue_signal(struct k_itimer *timr);
void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting);
int common_timer_set(struct k_itimer *timr, int flags,
struct itimerspec64 *new_setting,
struct itimerspec64 *old_setting);
+void posix_timer_set_common(struct k_itimer *timer, struct itimerspec64 *new_setting);
int common_timer_del(struct k_itimer *timer);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index b4843099a8da..ed58eebb4e8f 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -1141,7 +1141,6 @@ void tick_broadcast_switch_to_oneshot(void)
#ifdef CONFIG_HOTPLUG_CPU
void hotplug_cpu__broadcast_tick_pull(int deadcpu)
{
- struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
struct clock_event_device *bc;
unsigned long flags;
@@ -1167,6 +1166,8 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu)
* device to avoid the starvation.
*/
if (tick_check_broadcast_expired()) {
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
+
cpumask_clear_cpu(smp_processor_id(), tick_broadcast_force_mask);
tick_program_event(td->evtdev->next_event, 1);
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 2fa87dcfeda9..7e6f409bf311 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -2553,6 +2553,7 @@ int do_adjtimex(struct __kernel_timex *txc)
{
struct timekeeper *tk = &tk_core.timekeeper;
struct audit_ntp_data ad;
+ bool offset_set = false;
bool clock_set = false;
struct timespec64 ts;
unsigned long flags;
@@ -2575,6 +2576,7 @@ int do_adjtimex(struct __kernel_timex *txc)
if (ret)
return ret;
+ offset_set = delta.tv_sec != 0;
audit_tk_injoffset(delta);
}
@@ -2606,9 +2608,9 @@ int do_adjtimex(struct __kernel_timex *txc)
clock_set |= timekeeping_advance(TK_ADV_FREQ);
if (clock_set)
- clock_was_set(CLOCK_REALTIME);
+ clock_was_set(CLOCK_SET_WALL);
- ntp_notify_cmos_timer();
+ ntp_notify_cmos_timer(offset_set);
return ret;
}
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 48288dd4a102..0fc9d066a7be 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -289,7 +289,7 @@ static void timers_update_migration(void)
}
#ifdef CONFIG_SYSCTL
-static int timer_migration_handler(struct ctl_table *table, int write,
+static int timer_migration_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
@@ -365,7 +365,7 @@ static unsigned long round_jiffies_common(unsigned long j, int cpu,
rem = j % HZ;
/*
- * If the target jiffie is just after a whole second (which can happen
+ * If the target jiffy is just after a whole second (which can happen
* due to delays of the timer irq, long irq off times etc etc) then
* we should round down to the whole second, not up. Use 1/4th second
* as cutoff for this rounding as an extreme upper bound for this.
@@ -672,7 +672,7 @@ static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
* Set the next expiry time and kick the CPU so it
* can reevaluate the wheel:
*/
- base->next_expiry = bucket_expiry;
+ WRITE_ONCE(base->next_expiry, bucket_expiry);
base->timers_pending = true;
base->next_expiry_recalc = false;
trigger_dyntick_cpu(base, timer);
@@ -1561,6 +1561,8 @@ static inline void timer_base_unlock_expiry(struct timer_base *base)
* the waiter to acquire the lock and make progress.
*/
static void timer_sync_wait_running(struct timer_base *base)
+ __releases(&base->lock) __releases(&base->expiry_lock)
+ __acquires(&base->expiry_lock) __acquires(&base->lock)
{
if (atomic_read(&base->timer_waiters)) {
raw_spin_unlock_irq(&base->lock);
@@ -1898,7 +1900,7 @@ static int next_pending_bucket(struct timer_base *base, unsigned offset,
*
* Store next expiry time in base->next_expiry.
*/
-static void next_expiry_recalc(struct timer_base *base)
+static void timer_recalc_next_expiry(struct timer_base *base)
{
unsigned long clk, next, adj;
unsigned lvl, offset = 0;
@@ -1928,7 +1930,7 @@ static void next_expiry_recalc(struct timer_base *base)
* bits are zero, we look at the next level as is. If not we
* need to advance it by one because that's going to be the
* next expiring bucket in that level. base->clk is the next
- * expiring jiffie. So in case of:
+ * expiring jiffy. So in case of:
*
* LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
* 0 0 0 0 0 0
@@ -1964,7 +1966,7 @@ static void next_expiry_recalc(struct timer_base *base)
clk += adj;
}
- base->next_expiry = next;
+ WRITE_ONCE(base->next_expiry, next);
base->next_expiry_recalc = false;
base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA);
}
@@ -1993,7 +1995,7 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
return basem;
/*
- * Round up to the next jiffie. High resolution timers are
+ * Round up to the next jiffy. High resolution timers are
* off, so the hrtimers are expired in the tick and we need to
* make sure that this tick really expires the timer to avoid
* a ping pong of the nohz stop code.
@@ -2007,7 +2009,7 @@ static unsigned long next_timer_interrupt(struct timer_base *base,
unsigned long basej)
{
if (base->next_expiry_recalc)
- next_expiry_recalc(base);
+ timer_recalc_next_expiry(base);
/*
* Move next_expiry for the empty base into the future to prevent an
@@ -2018,7 +2020,7 @@ static unsigned long next_timer_interrupt(struct timer_base *base,
* easy comparable to find out which base holds the first pending timer.
*/
if (!base->timers_pending)
- base->next_expiry = basej + NEXT_TIMER_MAX_DELTA;
+ WRITE_ONCE(base->next_expiry, basej + NEXT_TIMER_MAX_DELTA);
return base->next_expiry;
}
@@ -2252,7 +2254,7 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
base_global, &tevt);
/*
- * If the next event is only one jiffie ahead there is no need to call
+ * If the next event is only one jiffy ahead there is no need to call
* timer migration hierarchy related functions. The value for the next
* global timer in @tevt struct equals then KTIME_MAX. This is also
* true, when the timer base is idle.
@@ -2411,7 +2413,7 @@ static inline void __run_timers(struct timer_base *base)
* jiffies to avoid endless requeuing to current jiffies.
*/
base->clk++;
- next_expiry_recalc(base);
+ timer_recalc_next_expiry(base);
while (levels--)
expire_timers(base, heads + levels);
@@ -2440,7 +2442,7 @@ static void run_timer_base(int index)
/*
* This function runs timers and the timer-tq in bottom half context.
*/
-static __latent_entropy void run_timer_softirq(struct softirq_action *h)
+static __latent_entropy void run_timer_softirq(void)
{
run_timer_base(BASE_LOCAL);
if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) {
@@ -2462,8 +2464,40 @@ static void run_local_timers(void)
hrtimer_run_queues();
for (int i = 0; i < NR_BASES; i++, base++) {
- /* Raise the softirq only if required. */
- if (time_after_eq(jiffies, base->next_expiry) ||
+ /*
+ * Raise the softirq only if required.
+ *
+ * timer_base::next_expiry can be written by a remote CPU while
+ * holding the lock. If this write happens at the same time than
+ * the lockless local read, sanity checker could complain about
+ * data corruption.
+ *
+ * There are two possible situations where
+ * timer_base::next_expiry is written by a remote CPU:
+ *
+ * 1. Remote CPU expires global timers of this CPU and updates
+ * timer_base::next_expiry of BASE_GLOBAL afterwards in
+ * next_timer_interrupt() or timer_recalc_next_expiry(). The
+ * worst outcome is a superfluous raise of the timer softirq
+ * when the not yet updated value is read.
+ *
+ * 2. A new first pinned timer is enqueued by a remote CPU
+ * and therefore timer_base::next_expiry of BASE_LOCAL is
+ * updated. When this update is missed, this isn't a
+ * problem, as an IPI is executed nevertheless when the CPU
+ * was idle before. When the CPU wasn't idle but the update
+ * is missed, then the timer would expire one jiffy late -
+ * bad luck.
+ *
+ * Those unlikely corner cases where the worst outcome is only a
+ * one jiffy delay or a superfluous raise of the softirq are
+ * not that expensive as doing the check always while holding
+ * the lock.
+ *
+ * Possible remote writers are using WRITE_ONCE(). Local reader
+ * uses therefore READ_ONCE().
+ */
+ if (time_after_eq(jiffies, READ_ONCE(base->next_expiry)) ||
(i == BASE_DEF && tmigr_requires_handle_remote())) {
raise_softirq(TIMER_SOFTIRQ);
return;
@@ -2730,7 +2764,7 @@ void __init init_timers(void)
*/
void msleep(unsigned int msecs)
{
- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+ unsigned long timeout = msecs_to_jiffies(msecs);
while (timeout)
timeout = schedule_timeout_uninterruptible(timeout);
@@ -2744,7 +2778,7 @@ EXPORT_SYMBOL(msleep);
*/
unsigned long msleep_interruptible(unsigned int msecs)
{
- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+ unsigned long timeout = msecs_to_jiffies(msecs);
while (timeout && !signal_pending(current))
timeout = schedule_timeout_interruptible(timeout);
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index 84413114db5c..8d57f7686bb0 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -475,9 +475,54 @@ static bool tmigr_check_lonely(struct tmigr_group *group)
return bitmap_weight(&active, BIT_CNT) <= 1;
}
-typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, void *);
+/**
+ * struct tmigr_walk - data required for walking the hierarchy
+ * @nextexp: Next CPU event expiry information which is handed into
+ * the timer migration code by the timer code
+ * (get_next_timer_interrupt())
+ * @firstexp: Contains the first event expiry information when
+ * hierarchy is completely idle. When CPU itself was the
+ * last going idle, information makes sure, that CPU will
+ * be back in time. When using this value in the remote
+ * expiry case, firstexp is stored in the per CPU tmigr_cpu
+ * struct of CPU which expires remote timers. It is updated
+ * in top level group only. Be aware, there could occur a
+ * new top level of the hierarchy between the 'top level
+ * call' in tmigr_update_events() and the check for the
+ * parent group in walk_groups(). Then @firstexp might
+ * contain a value != KTIME_MAX even if it was not the
+ * final top level. This is not a problem, as the worst
+ * outcome is a CPU which might wake up a little early.
+ * @evt: Pointer to tmigr_event which needs to be queued (of idle
+ * child group)
+ * @childmask: groupmask of child group
+ * @remote: Is set, when the new timer path is executed in
+ * tmigr_handle_remote_cpu()
+ * @basej: timer base in jiffies
+ * @now: timer base monotonic
+ * @check: is set if there is the need to handle remote timers;
+ * required in tmigr_requires_handle_remote() only
+ * @tmc_active: this flag indicates, whether the CPU which triggers
+ * the hierarchy walk is !idle in the timer migration
+ * hierarchy. When the CPU is idle and the whole hierarchy is
+ * idle, only the first event of the top level has to be
+ * considered.
+ */
+struct tmigr_walk {
+ u64 nextexp;
+ u64 firstexp;
+ struct tmigr_event *evt;
+ u8 childmask;
+ bool remote;
+ unsigned long basej;
+ u64 now;
+ bool check;
+ bool tmc_active;
+};
+
+typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, struct tmigr_walk *);
-static void __walk_groups(up_f up, void *data,
+static void __walk_groups(up_f up, struct tmigr_walk *data,
struct tmigr_cpu *tmc)
{
struct tmigr_group *child = NULL, *group = tmc->tmgroup;
@@ -490,64 +535,17 @@ static void __walk_groups(up_f up, void *data,
child = group;
group = group->parent;
+ data->childmask = child->groupmask;
} while (group);
}
-static void walk_groups(up_f up, void *data, struct tmigr_cpu *tmc)
+static void walk_groups(up_f up, struct tmigr_walk *data, struct tmigr_cpu *tmc)
{
lockdep_assert_held(&tmc->lock);
__walk_groups(up, data, tmc);
}
-/**
- * struct tmigr_walk - data required for walking the hierarchy
- * @nextexp: Next CPU event expiry information which is handed into
- * the timer migration code by the timer code
- * (get_next_timer_interrupt())
- * @firstexp: Contains the first event expiry information when last
- * active CPU of hierarchy is on the way to idle to make
- * sure CPU will be back in time.
- * @evt: Pointer to tmigr_event which needs to be queued (of idle
- * child group)
- * @childmask: childmask of child group
- * @remote: Is set, when the new timer path is executed in
- * tmigr_handle_remote_cpu()
- */
-struct tmigr_walk {
- u64 nextexp;
- u64 firstexp;
- struct tmigr_event *evt;
- u8 childmask;
- bool remote;
-};
-
-/**
- * struct tmigr_remote_data - data required for remote expiry hierarchy walk
- * @basej: timer base in jiffies
- * @now: timer base monotonic
- * @firstexp: returns expiry of the first timer in the idle timer
- * migration hierarchy to make sure the timer is handled in
- * time; it is stored in the per CPU tmigr_cpu struct of
- * CPU which expires remote timers
- * @childmask: childmask of child group
- * @check: is set if there is the need to handle remote timers;
- * required in tmigr_requires_handle_remote() only
- * @tmc_active: this flag indicates, whether the CPU which triggers
- * the hierarchy walk is !idle in the timer migration
- * hierarchy. When the CPU is idle and the whole hierarchy is
- * idle, only the first event of the top level has to be
- * considered.
- */
-struct tmigr_remote_data {
- unsigned long basej;
- u64 now;
- u64 firstexp;
- u8 childmask;
- bool check;
- bool tmc_active;
-};
-
/*
* Returns the next event of the timerqueue @group->events
*
@@ -618,10 +616,9 @@ static u64 tmigr_next_groupevt_expires(struct tmigr_group *group)
static bool tmigr_active_up(struct tmigr_group *group,
struct tmigr_group *child,
- void *ptr)
+ struct tmigr_walk *data)
{
union tmigr_state curstate, newstate;
- struct tmigr_walk *data = ptr;
bool walk_done;
u8 childmask;
@@ -649,8 +646,7 @@ static bool tmigr_active_up(struct tmigr_group *group,
} while (!atomic_try_cmpxchg(&group->migr_state, &curstate.state, newstate.state));
- if ((walk_done == false) && group->parent)
- data->childmask = group->childmask;
+ trace_tmigr_group_set_cpu_active(group, newstate, childmask);
/*
* The group is active (again). The group event might be still queued
@@ -666,8 +662,6 @@ static bool tmigr_active_up(struct tmigr_group *group,
*/
group->groupevt.ignore = true;
- trace_tmigr_group_set_cpu_active(group, newstate, childmask);
-
return walk_done;
}
@@ -675,7 +669,7 @@ static void __tmigr_cpu_activate(struct tmigr_cpu *tmc)
{
struct tmigr_walk data;
- data.childmask = tmc->childmask;
+ data.childmask = tmc->groupmask;
trace_tmigr_cpu_active(tmc);
@@ -860,10 +854,8 @@ unlock:
static bool tmigr_new_timer_up(struct tmigr_group *group,
struct tmigr_group *child,
- void *ptr)
+ struct tmigr_walk *data)
{
- struct tmigr_walk *data = ptr;
-
return tmigr_update_events(group, child, data);
}
@@ -995,9 +987,8 @@ unlock:
static bool tmigr_handle_remote_up(struct tmigr_group *group,
struct tmigr_group *child,
- void *ptr)
+ struct tmigr_walk *data)
{
- struct tmigr_remote_data *data = ptr;
struct tmigr_event *evt;
unsigned long jif;
u8 childmask;
@@ -1034,12 +1025,10 @@ again:
}
/*
- * Update of childmask for the next level and keep track of the expiry
- * of the first event that needs to be handled (group->next_expiry was
- * updated by tmigr_next_expired_groupevt(), next was set by
- * tmigr_handle_remote_cpu()).
+ * Keep track of the expiry of the first event that needs to be handled
+ * (group->next_expiry was updated by tmigr_next_expired_groupevt(),
+ * next was set by tmigr_handle_remote_cpu()).
*/
- data->childmask = group->childmask;
data->firstexp = group->next_expiry;
raw_spin_unlock_irq(&group->lock);
@@ -1055,12 +1044,12 @@ again:
void tmigr_handle_remote(void)
{
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
- struct tmigr_remote_data data;
+ struct tmigr_walk data;
if (tmigr_is_not_available(tmc))
return;
- data.childmask = tmc->childmask;
+ data.childmask = tmc->groupmask;
data.firstexp = KTIME_MAX;
/*
@@ -1068,7 +1057,7 @@ void tmigr_handle_remote(void)
* in tmigr_handle_remote_up() anyway. Keep this check to speed up the
* return when nothing has to be done.
*/
- if (!tmigr_check_migrator(tmc->tmgroup, tmc->childmask)) {
+ if (!tmigr_check_migrator(tmc->tmgroup, tmc->groupmask)) {
/*
* If this CPU was an idle migrator, make sure to clear its wakeup
* value so it won't chase timers that have already expired elsewhere.
@@ -1097,9 +1086,8 @@ void tmigr_handle_remote(void)
static bool tmigr_requires_handle_remote_up(struct tmigr_group *group,
struct tmigr_group *child,
- void *ptr)
+ struct tmigr_walk *data)
{
- struct tmigr_remote_data *data = ptr;
u8 childmask;
childmask = data->childmask;
@@ -1118,7 +1106,7 @@ static bool tmigr_requires_handle_remote_up(struct tmigr_group *group,
* group before reading the next_expiry value.
*/
if (group->parent && !data->tmc_active)
- goto out;
+ return false;
/*
* The lock is required on 32bit architectures to read the variable
@@ -1143,9 +1131,6 @@ static bool tmigr_requires_handle_remote_up(struct tmigr_group *group,
raw_spin_unlock(&group->lock);
}
-out:
- /* Update of childmask for the next level */
- data->childmask = group->childmask;
return false;
}
@@ -1157,7 +1142,7 @@ out:
bool tmigr_requires_handle_remote(void)
{
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
- struct tmigr_remote_data data;
+ struct tmigr_walk data;
unsigned long jif;
bool ret = false;
@@ -1165,7 +1150,7 @@ bool tmigr_requires_handle_remote(void)
return ret;
data.now = get_jiffies_update(&jif);
- data.childmask = tmc->childmask;
+ data.childmask = tmc->groupmask;
data.firstexp = KTIME_MAX;
data.tmc_active = !tmc->idle;
data.check = false;
@@ -1230,14 +1215,13 @@ u64 tmigr_cpu_new_timer(u64 nextexp)
if (nextexp != tmc->cpuevt.nextevt.expires ||
tmc->cpuevt.ignore) {
ret = tmigr_new_timer(tmc, nextexp);
+ /*
+ * Make sure the reevaluation of timers in idle path
+ * will not miss an event.
+ */
+ WRITE_ONCE(tmc->wakeup, ret);
}
}
- /*
- * Make sure the reevaluation of timers in idle path will not miss an
- * event.
- */
- WRITE_ONCE(tmc->wakeup, ret);
-
trace_tmigr_cpu_new_timer_idle(tmc, nextexp);
raw_spin_unlock(&tmc->lock);
return ret;
@@ -1245,10 +1229,9 @@ u64 tmigr_cpu_new_timer(u64 nextexp)
static bool tmigr_inactive_up(struct tmigr_group *group,
struct tmigr_group *child,
- void *ptr)
+ struct tmigr_walk *data)
{
union tmigr_state curstate, newstate, childstate;
- struct tmigr_walk *data = ptr;
bool walk_done;
u8 childmask;
@@ -1299,9 +1282,10 @@ static bool tmigr_inactive_up(struct tmigr_group *group,
WARN_ON_ONCE((newstate.migrator != TMIGR_NONE) && !(newstate.active));
- if (atomic_try_cmpxchg(&group->migr_state, &curstate.state,
- newstate.state))
+ if (atomic_try_cmpxchg(&group->migr_state, &curstate.state, newstate.state)) {
+ trace_tmigr_group_set_cpu_inactive(group, newstate, childmask);
break;
+ }
/*
* The memory barrier is paired with the cmpxchg() in
@@ -1317,22 +1301,6 @@ static bool tmigr_inactive_up(struct tmigr_group *group,
/* Event Handling */
tmigr_update_events(group, child, data);
- if (group->parent && (walk_done == false))
- data->childmask = group->childmask;
-
- /*
- * data->firstexp was set by tmigr_update_events() and contains the
- * expiry of the first global event which needs to be handled. It
- * differs from KTIME_MAX if:
- * - group is the top level group and
- * - group is idle (which means CPU was the last active CPU in the
- * hierarchy) and
- * - there is a pending event in the hierarchy
- */
- WARN_ON_ONCE(data->firstexp != KTIME_MAX && group->parent);
-
- trace_tmigr_group_set_cpu_inactive(group, newstate, childmask);
-
return walk_done;
}
@@ -1341,7 +1309,7 @@ static u64 __tmigr_cpu_deactivate(struct tmigr_cpu *tmc, u64 nextexp)
struct tmigr_walk data = { .nextexp = nextexp,
.firstexp = KTIME_MAX,
.evt = &tmc->cpuevt,
- .childmask = tmc->childmask };
+ .childmask = tmc->groupmask };
/*
* If nextexp is KTIME_MAX, the CPU event will be ignored because the
@@ -1400,7 +1368,7 @@ u64 tmigr_cpu_deactivate(u64 nextexp)
* the only one in the level 0 group; and if it is the
* only one in level 0 group, but there are more than a
* single group active on the way to top level)
- * * nextevt - when CPU is offline and has to handle timer on his own
+ * * nextevt - when CPU is offline and has to handle timer on its own
* or when on the way to top in every group only a single
* child is active but @nextevt is before the lowest
* next_expiry encountered while walking up to top level.
@@ -1419,7 +1387,7 @@ u64 tmigr_quick_check(u64 nextevt)
if (WARN_ON_ONCE(tmc->idle))
return nextevt;
- if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->childmask))
+ if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->groupmask))
return KTIME_MAX;
do {
@@ -1442,6 +1410,66 @@ u64 tmigr_quick_check(u64 nextevt)
return KTIME_MAX;
}
+/*
+ * tmigr_trigger_active() - trigger a CPU to become active again
+ *
+ * This function is executed on a CPU which is part of cpu_online_mask, when the
+ * last active CPU in the hierarchy is offlining. With this, it is ensured that
+ * the other CPU is active and takes over the migrator duty.
+ */
+static long tmigr_trigger_active(void *unused)
+{
+ struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
+
+ WARN_ON_ONCE(!tmc->online || tmc->idle);
+
+ return 0;
+}
+
+static int tmigr_cpu_offline(unsigned int cpu)
+{
+ struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
+ int migrator;
+ u64 firstexp;
+
+ raw_spin_lock_irq(&tmc->lock);
+ tmc->online = false;
+ WRITE_ONCE(tmc->wakeup, KTIME_MAX);
+
+ /*
+ * CPU has to handle the local events on his own, when on the way to
+ * offline; Therefore nextevt value is set to KTIME_MAX
+ */
+ firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
+ trace_tmigr_cpu_offline(tmc);
+ raw_spin_unlock_irq(&tmc->lock);
+
+ if (firstexp != KTIME_MAX) {
+ migrator = cpumask_any_but(cpu_online_mask, cpu);
+ work_on_cpu(migrator, tmigr_trigger_active, NULL);
+ }
+
+ return 0;
+}
+
+static int tmigr_cpu_online(unsigned int cpu)
+{
+ struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
+
+ /* Check whether CPU data was successfully initialized */
+ if (WARN_ON_ONCE(!tmc->tmgroup))
+ return -EINVAL;
+
+ raw_spin_lock_irq(&tmc->lock);
+ trace_tmigr_cpu_online(tmc);
+ tmc->idle = timer_base_is_idle();
+ if (!tmc->idle)
+ __tmigr_cpu_activate(tmc);
+ tmc->online = true;
+ raw_spin_unlock_irq(&tmc->lock);
+ return 0;
+}
+
static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
int node)
{
@@ -1514,21 +1542,25 @@ static struct tmigr_group *tmigr_get_group(unsigned int cpu, int node,
}
static void tmigr_connect_child_parent(struct tmigr_group *child,
- struct tmigr_group *parent)
+ struct tmigr_group *parent,
+ bool activate)
{
- union tmigr_state childstate;
+ struct tmigr_walk data;
raw_spin_lock_irq(&child->lock);
raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
child->parent = parent;
- child->childmask = BIT(parent->num_children++);
+ child->groupmask = BIT(parent->num_children++);
raw_spin_unlock(&parent->lock);
raw_spin_unlock_irq(&child->lock);
trace_tmigr_connect_child_parent(child);
+ if (!activate)
+ return;
+
/*
* To prevent inconsistent states, active children need to be active in
* the new parent as well. Inactive children are already marked inactive
@@ -1544,21 +1576,24 @@ static void tmigr_connect_child_parent(struct tmigr_group *child,
* child to the new parent. So tmigr_connect_child_parent() is
* executed with the formerly top level group (child) and the newly
* created group (parent).
+ *
+ * * It is ensured that the child is active, as this setup path is
+ * executed in hotplug prepare callback. This is exectued by an
+ * already connected and !idle CPU. Even if all other CPUs go idle,
+ * the CPU executing the setup will be responsible up to current top
+ * level group. And the next time it goes inactive, it will release
+ * the new childmask and parent to subsequent walkers through this
+ * @child. Therefore propagate active state unconditionally.
*/
- childstate.state = atomic_read(&child->migr_state);
- if (childstate.migrator != TMIGR_NONE) {
- struct tmigr_walk data;
-
- data.childmask = child->childmask;
+ data.childmask = child->groupmask;
- /*
- * There is only one new level per time. When connecting the
- * child and the parent and set the child active when the parent
- * is inactive, the parent needs to be the uppermost
- * level. Otherwise there went something wrong!
- */
- WARN_ON(!tmigr_active_up(parent, child, &data) && parent->parent);
- }
+ /*
+ * There is only one new level per time (which is protected by
+ * tmigr_mutex). When connecting the child and the parent and set the
+ * child active when the parent is inactive, the parent needs to be the
+ * uppermost level. Otherwise there went something wrong!
+ */
+ WARN_ON(!tmigr_active_up(parent, child, &data) && parent->parent);
}
static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
@@ -1611,12 +1646,12 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
* Update tmc -> group / child -> group connection
*/
if (i == 0) {
- struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
+ struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
raw_spin_lock_irq(&group->lock);
tmc->tmgroup = group;
- tmc->childmask = BIT(group->num_children++);
+ tmc->groupmask = BIT(group->num_children++);
raw_spin_unlock_irq(&group->lock);
@@ -1626,7 +1661,8 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
continue;
} else {
child = stack[i - 1];
- tmigr_connect_child_parent(child, group);
+ /* Will be activated at online time */
+ tmigr_connect_child_parent(child, group, false);
}
/* check if uppermost level was newly created */
@@ -1637,12 +1673,21 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
lvllist = &tmigr_level_list[top];
if (group->num_children == 1 && list_is_singular(lvllist)) {
+ /*
+ * The target CPU must never do the prepare work, except
+ * on early boot when the boot CPU is the target. Otherwise
+ * it may spuriously activate the old top level group inside
+ * the new one (nevertheless whether old top level group is
+ * active or not) and/or release an uninitialized childmask.
+ */
+ WARN_ON_ONCE(cpu == raw_smp_processor_id());
+
lvllist = &tmigr_level_list[top - 1];
list_for_each_entry(child, lvllist, list) {
if (child->parent)
continue;
- tmigr_connect_child_parent(child, group);
+ tmigr_connect_child_parent(child, group, true);
}
}
}
@@ -1664,80 +1709,31 @@ static int tmigr_add_cpu(unsigned int cpu)
return ret;
}
-static int tmigr_cpu_online(unsigned int cpu)
-{
- struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
- int ret;
-
- /* First online attempt? Initialize CPU data */
- if (!tmc->tmgroup) {
- raw_spin_lock_init(&tmc->lock);
-
- ret = tmigr_add_cpu(cpu);
- if (ret < 0)
- return ret;
-
- if (tmc->childmask == 0)
- return -EINVAL;
-
- timerqueue_init(&tmc->cpuevt.nextevt);
- tmc->cpuevt.nextevt.expires = KTIME_MAX;
- tmc->cpuevt.ignore = true;
- tmc->cpuevt.cpu = cpu;
-
- tmc->remote = false;
- WRITE_ONCE(tmc->wakeup, KTIME_MAX);
- }
- raw_spin_lock_irq(&tmc->lock);
- trace_tmigr_cpu_online(tmc);
- tmc->idle = timer_base_is_idle();
- if (!tmc->idle)
- __tmigr_cpu_activate(tmc);
- tmc->online = true;
- raw_spin_unlock_irq(&tmc->lock);
- return 0;
-}
-
-/*
- * tmigr_trigger_active() - trigger a CPU to become active again
- *
- * This function is executed on a CPU which is part of cpu_online_mask, when the
- * last active CPU in the hierarchy is offlining. With this, it is ensured that
- * the other CPU is active and takes over the migrator duty.
- */
-static long tmigr_trigger_active(void *unused)
+static int tmigr_cpu_prepare(unsigned int cpu)
{
- struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
+ struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
+ int ret = 0;
- WARN_ON_ONCE(!tmc->online || tmc->idle);
-
- return 0;
-}
-
-static int tmigr_cpu_offline(unsigned int cpu)
-{
- struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
- int migrator;
- u64 firstexp;
+ /* Not first online attempt? */
+ if (tmc->tmgroup)
+ return ret;
- raw_spin_lock_irq(&tmc->lock);
- tmc->online = false;
+ raw_spin_lock_init(&tmc->lock);
+ timerqueue_init(&tmc->cpuevt.nextevt);
+ tmc->cpuevt.nextevt.expires = KTIME_MAX;
+ tmc->cpuevt.ignore = true;
+ tmc->cpuevt.cpu = cpu;
+ tmc->remote = false;
WRITE_ONCE(tmc->wakeup, KTIME_MAX);
- /*
- * CPU has to handle the local events on his own, when on the way to
- * offline; Therefore nextevt value is set to KTIME_MAX
- */
- firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
- trace_tmigr_cpu_offline(tmc);
- raw_spin_unlock_irq(&tmc->lock);
+ ret = tmigr_add_cpu(cpu);
+ if (ret < 0)
+ return ret;
- if (firstexp != KTIME_MAX) {
- migrator = cpumask_any_but(cpu_online_mask, cpu);
- work_on_cpu(migrator, tmigr_trigger_active, NULL);
- }
+ if (tmc->groupmask == 0)
+ return -EINVAL;
- return 0;
+ return ret;
}
static int __init tmigr_init(void)
@@ -1796,6 +1792,11 @@ static int __init tmigr_init(void)
tmigr_hierarchy_levels, TMIGR_CHILDREN_PER_GROUP,
tmigr_crossnode_level);
+ ret = cpuhp_setup_state(CPUHP_TMIGR_PREPARE, "tmigr:prepare",
+ tmigr_cpu_prepare, NULL);
+ if (ret)
+ goto err;
+
ret = cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online",
tmigr_cpu_online, tmigr_cpu_offline);
if (ret)
@@ -1807,4 +1808,4 @@ err:
pr_err("Timer migration setup failed\n");
return ret;
}
-late_initcall(tmigr_init);
+early_initcall(tmigr_init);
diff --git a/kernel/time/timer_migration.h b/kernel/time/timer_migration.h
index 6c37d94a37d9..154accc7a543 100644
--- a/kernel/time/timer_migration.h
+++ b/kernel/time/timer_migration.h
@@ -22,7 +22,17 @@ struct tmigr_event {
* struct tmigr_group - timer migration hierarchy group
* @lock: Lock protecting the event information and group hierarchy
* information during setup
- * @parent: Pointer to the parent group
+ * @parent: Pointer to the parent group. Pointer is updated when a
+ * new hierarchy level is added because of a CPU coming
+ * online the first time. Once it is set, the pointer will
+ * not be removed or updated. When accessing parent pointer
+ * lock less to decide whether to abort a propagation or
+ * not, it is not a problem. The worst outcome is an
+ * unnecessary/early CPU wake up. But do not access parent
+ * pointer several times in the same 'action' (like
+ * activation, deactivation, check for remote expiry,...)
+ * without holding the lock as it is not ensured that value
+ * will not change.
* @groupevt: Next event of the group which is only used when the
* group is !active. The group event is then queued into
* the parent timer queue.
@@ -41,9 +51,8 @@ struct tmigr_event {
* @num_children: Counter of group children to make sure the group is only
* filled with TMIGR_CHILDREN_PER_GROUP; Required for setup
* only
- * @childmask: childmask of the group in the parent group; is set
- * during setup and will never change; can be read
- * lockless
+ * @groupmask: mask of the group in the parent group; is set during
+ * setup and will never change; can be read lockless
* @list: List head that is added to the per level
* tmigr_level_list; is required during setup when a
* new group needs to be connected to the existing
@@ -59,7 +68,7 @@ struct tmigr_group {
unsigned int level;
int numa_node;
unsigned int num_children;
- u8 childmask;
+ u8 groupmask;
struct list_head list;
};
@@ -79,7 +88,7 @@ struct tmigr_group {
* hierarchy
* @remote: Is set when timers of the CPU are expired remotely
* @tmgroup: Pointer to the parent group
- * @childmask: childmask of tmigr_cpu in the parent group
+ * @groupmask: mask of tmigr_cpu in the parent group
* @wakeup: Stores the first timer when the timer migration
* hierarchy is completely idle and remote expiry was done;
* is returned to timer code in the idle path and is only
@@ -92,7 +101,7 @@ struct tmigr_cpu {
bool idle;
bool remote;
struct tmigr_group *tmgroup;
- u8 childmask;
+ u8 groupmask;
u64 wakeup;
struct tmigr_event cpuevt;
};
@@ -108,8 +117,8 @@ union tmigr_state {
u32 state;
/**
* struct - split state of tmigr_group
- * @active: Contains each childmask bit of the active children
- * @migrator: Contains childmask of the child which is migrator
+ * @active: Contains each mask bit of the active children
+ * @migrator: Contains mask of the child which is migrator
* @seq: Sequence counter needs to be increased when an update
* to the tmigr_state is done. It prevents a race when
* updates in the child groups are propagated in changed