diff options
author | Ingo Molnar <[email protected]> | 2020-07-31 12:12:21 +0200 |
---|---|---|
committer | Ingo Molnar <[email protected]> | 2020-07-31 12:13:21 +0200 |
commit | adb334d17858d8b679a41f7f2cd230e5c6accc0a (patch) | |
tree | fd4fd188aedd4412b6d1097eb58e0a479bb97596 /kernel | |
parent | f3020b8891b890b48d9e1a83241e3cce518427c1 (diff) | |
parent | f87032aec41e0b00ae5fa9103eb8e7b2d1f8416b (diff) |
Merge branch 'WIP.x86/entry' into x86/entry, to merge the latest generic code and resolve conflicts
Pick up and resolve the NMI entry code changes from the locking tree,
and also pick up the latest two fixes from tip:core/entry.
Signed-off-by: Ingo Molnar <[email protected]>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/entry/common.c | 4 | ||||
-rw-r--r-- | kernel/kcsan/core.c | 5 | ||||
-rw-r--r-- | kernel/kcsan/report.c | 9 | ||||
-rw-r--r-- | kernel/locking/lockdep.c | 46 |
4 files changed, 51 insertions, 13 deletions
diff --git a/kernel/entry/common.c b/kernel/entry/common.c index 495f5c051b03..9852e0d62d95 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -256,7 +256,7 @@ noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs) exit_to_user_mode(); } -irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs) +noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs) { irqentry_state_t ret = { .exit_rcu = false, @@ -333,7 +333,7 @@ void irqentry_exit_cond_resched(void) } } -void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state) +noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state) { lockdep_assert_irqs_disabled(); diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 15f67949d11e..732623c30359 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -397,8 +397,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) } if (!kcsan_interrupt_watcher) - /* Use raw to avoid lockdep recursion via IRQ flags tracing. */ - raw_local_irq_save(irq_flags); + local_irq_save(irq_flags); watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write); if (watchpoint == NULL) { @@ -539,7 +538,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS); out_unlock: if (!kcsan_interrupt_watcher) - raw_local_irq_restore(irq_flags); + local_irq_restore(irq_flags); out: user_access_restore(ua_flags); } diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index ac5f8345bae9..6b2fb1a6d8cd 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -606,10 +606,11 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type, goto out; /* - * With TRACE_IRQFLAGS, lockdep's IRQ trace state becomes corrupted if - * we do not turn off lockdep here; this could happen due to recursion - * into lockdep via KCSAN if we detect a race in utilities used by - * lockdep. + * Because we may generate reports when we're in scheduler code, the use + * of printk() could deadlock. Until such time that all printing code + * called in print_report() is scheduler-safe, accept the risk, and just + * get our message out. As such, also disable lockdep to hide the + * warning, and avoid disabling lockdep for the rest of the kernel. */ lockdep_off(); diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 29a8de4c50b9..d595623c4b34 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -395,7 +395,7 @@ void lockdep_init_task(struct task_struct *task) static __always_inline void lockdep_recursion_finish(void) { - if (WARN_ON_ONCE(--current->lockdep_recursion)) + if (WARN_ON_ONCE((--current->lockdep_recursion) & LOCKDEP_RECURSION_MASK)) current->lockdep_recursion = 0; } @@ -3646,7 +3646,16 @@ static void __trace_hardirqs_on_caller(void) */ void lockdep_hardirqs_on_prepare(unsigned long ip) { - if (unlikely(!debug_locks || current->lockdep_recursion)) + if (unlikely(!debug_locks)) + return; + + /* + * NMIs do not (and cannot) track lock dependencies, nothing to do. + */ + if (unlikely(in_nmi())) + return; + + if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK)) return; if (unlikely(current->hardirqs_enabled)) { @@ -3692,7 +3701,27 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) { struct task_struct *curr = current; - if (unlikely(!debug_locks || curr->lockdep_recursion)) + if (unlikely(!debug_locks)) + return; + + /* + * NMIs can happen in the middle of local_irq_{en,dis}able() where the + * tracking state and hardware state are out of sync. + * + * NMIs must save lockdep_hardirqs_enabled() to restore IRQ state from, + * and not rely on hardware state like normal interrupts. + */ + if (unlikely(in_nmi())) { + /* + * Skip: + * - recursion check, because NMI can hit lockdep; + * - hardware state check, because above; + * - chain_key check, see lockdep_hardirqs_on_prepare(). + */ + goto skip_checks; + } + + if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK)) return; if (curr->hardirqs_enabled) { @@ -3720,6 +3749,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key != current->curr_chain_key); +skip_checks: /* we'll do an OFF -> ON transition: */ curr->hardirqs_enabled = 1; curr->hardirq_enable_ip = ip; @@ -3735,7 +3765,15 @@ void noinstr lockdep_hardirqs_off(unsigned long ip) { struct task_struct *curr = current; - if (unlikely(!debug_locks || curr->lockdep_recursion)) + if (unlikely(!debug_locks)) + return; + + /* + * Matching lockdep_hardirqs_on(), allow NMIs in the middle of lockdep; + * they will restore the software state. This ensures the software + * state is consistent inside NMIs as well. + */ + if (unlikely(!in_nmi() && (current->lockdep_recursion & LOCKDEP_RECURSION_MASK))) return; /* |