diff options
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r-- | kernel/time/tick-sched.c | 70 |
1 files changed, 35 insertions, 35 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 084b79f5917e..536ada80f6dd 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -157,52 +157,50 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) cpumask_var_t tick_nohz_full_mask; cpumask_var_t housekeeping_mask; bool tick_nohz_full_running; -static unsigned long tick_dep_mask; +static atomic_t tick_dep_mask; -static void trace_tick_dependency(unsigned long dep) +static bool check_tick_dependency(atomic_t *dep) { - if (dep & TICK_DEP_MASK_POSIX_TIMER) { + int val = atomic_read(dep); + + if (val & TICK_DEP_MASK_POSIX_TIMER) { trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); - return; + return true; } - if (dep & TICK_DEP_MASK_PERF_EVENTS) { + if (val & TICK_DEP_MASK_PERF_EVENTS) { trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); - return; + return true; } - if (dep & TICK_DEP_MASK_SCHED) { + if (val & TICK_DEP_MASK_SCHED) { trace_tick_stop(0, TICK_DEP_MASK_SCHED); - return; + return true; } - if (dep & TICK_DEP_MASK_CLOCK_UNSTABLE) + if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) { trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); + return true; + } + + return false; } static bool can_stop_full_tick(struct tick_sched *ts) { WARN_ON_ONCE(!irqs_disabled()); - if (tick_dep_mask) { - trace_tick_dependency(tick_dep_mask); + if (check_tick_dependency(&tick_dep_mask)) return false; - } - if (ts->tick_dep_mask) { - trace_tick_dependency(ts->tick_dep_mask); + if (check_tick_dependency(&ts->tick_dep_mask)) return false; - } - if (current->tick_dep_mask) { - trace_tick_dependency(current->tick_dep_mask); + if (check_tick_dependency(¤t->tick_dep_mask)) return false; - } - if (current->signal->tick_dep_mask) { - trace_tick_dependency(current->signal->tick_dep_mask); + if (check_tick_dependency(¤t->signal->tick_dep_mask)) return false; - } return true; } @@ -259,12 +257,12 @@ static void tick_nohz_full_kick_all(void) preempt_enable(); } -static void tick_nohz_dep_set_all(unsigned long *dep, +static void tick_nohz_dep_set_all(atomic_t *dep, enum tick_dep_bits bit) { - unsigned long prev; + int prev; - prev = fetch_or(dep, BIT_MASK(bit)); + prev = atomic_fetch_or(BIT(bit), dep); if (!prev) tick_nohz_full_kick_all(); } @@ -280,7 +278,7 @@ void tick_nohz_dep_set(enum tick_dep_bits bit) void tick_nohz_dep_clear(enum tick_dep_bits bit) { - clear_bit(bit, &tick_dep_mask); + atomic_andnot(BIT(bit), &tick_dep_mask); } /* @@ -289,12 +287,12 @@ void tick_nohz_dep_clear(enum tick_dep_bits bit) */ void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { - unsigned long prev; + int prev; struct tick_sched *ts; ts = per_cpu_ptr(&tick_cpu_sched, cpu); - prev = fetch_or(&ts->tick_dep_mask, BIT_MASK(bit)); + prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); if (!prev) { preempt_disable(); /* Perf needs local kick that is NMI safe */ @@ -313,7 +311,7 @@ void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); - clear_bit(bit, &ts->tick_dep_mask); + atomic_andnot(BIT(bit), &ts->tick_dep_mask); } /* @@ -331,7 +329,7 @@ void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) { - clear_bit(bit, &tsk->tick_dep_mask); + atomic_andnot(BIT(bit), &tsk->tick_dep_mask); } /* @@ -345,7 +343,7 @@ void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit) void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) { - clear_bit(bit, &sig->tick_dep_mask); + atomic_andnot(BIT(bit), &sig->tick_dep_mask); } /* @@ -366,7 +364,8 @@ void __tick_nohz_task_switch(void) ts = this_cpu_ptr(&tick_cpu_sched); if (ts->tick_stopped) { - if (current->tick_dep_mask || current->signal->tick_dep_mask) + if (atomic_read(¤t->tick_dep_mask) || + atomic_read(¤t->signal->tick_dep_mask)) tick_nohz_full_kick(); } out: @@ -777,6 +776,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, if (!ts->tick_stopped) { nohz_balance_enter_idle(cpu); calc_load_enter_idle(); + cpu_load_update_nohz_start(); ts->last_tick = hrtimer_get_expires(&ts->sched_timer); ts->tick_stopped = 1; @@ -803,11 +803,11 @@ out: return tick; } -static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now, int active) +static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) { /* Update jiffies first */ tick_do_update_jiffies64(now); - update_cpu_load_nohz(active); + cpu_load_update_nohz_stop(); calc_load_exit_idle(); touch_softlockup_watchdog_sched(); @@ -834,7 +834,7 @@ static void tick_nohz_full_update_tick(struct tick_sched *ts) if (can_stop_full_tick(ts)) tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); else if (ts->tick_stopped) - tick_nohz_restart_sched_tick(ts, ktime_get(), 1); + tick_nohz_restart_sched_tick(ts, ktime_get()); #endif } @@ -1025,7 +1025,7 @@ void tick_nohz_idle_exit(void) tick_nohz_stop_idle(ts, now); if (ts->tick_stopped) { - tick_nohz_restart_sched_tick(ts, now, 0); + tick_nohz_restart_sched_tick(ts, now); tick_nohz_account_idle_ticks(ts); } |