diff options
61 files changed, 183 insertions, 186 deletions
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index 72be01270e24..1f6bb184a44d 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -337,11 +337,11 @@ resume_user_mode_begin: resume_kernel_mode: ; Disable Interrupts from this point on - ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq() - ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe + ; CONFIG_PREEMPTION: This is a must for preempt_schedule_irq() + ; !CONFIG_PREEMPTION: To ensure restore_regs is intr safe IRQ_DISABLE r9 -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION ; Can't preempt if preemption disabled GET_CURR_THR_INFO_FROM_SP r10 diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h index d3e937dcee4d..007d8fea7157 100644 --- a/arch/arm/include/asm/switch_to.h +++ b/arch/arm/include/asm/switch_to.h @@ -10,7 +10,7 @@ * to ensure that the maintenance completes in case we migrate to another * CPU. */ -#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7) +#if defined(CONFIG_PREEMPTION) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7) #define __complete_pending_tlbi() dsb(ish) #else #define __complete_pending_tlbi() diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 858d4e541532..77f54830554c 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -211,7 +211,7 @@ __irq_svc: svc_entry irq_handler -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION ldr r8, [tsk, #TI_PREEMPT] @ get preempt count ldr r0, [tsk, #TI_FLAGS] @ get flags teq r8, #0 @ if preempt count != 0 @@ -226,7 +226,7 @@ ENDPROC(__irq_svc) .ltorg -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION svc_preempt: mov r8, lr 1: bl preempt_schedule_irq @ irq en/disable is done inside diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index c053abd1fb53..abb7dd7e656f 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -248,6 +248,8 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) #ifdef CONFIG_PREEMPT #define S_PREEMPT " PREEMPT" +#elif defined(CONFIG_PREEMPT_RT) +#define S_PREEMPT " PREEMPT_RT" #else #define S_PREEMPT "" #endif diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 0ee8fc4b4672..dc8f152f3556 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -135,13 +135,13 @@ flush_levels: and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic #endif mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr isb @ isb to sych the new cssr&csidr mrc p15, 1, r1, c0, c0, 0 @ read the new csidr -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION restore_irqs_notrace r9 #endif and r2, r1, #7 @ extract the length of the cache lines diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S index a0035c426ce6..1bc3a0a50753 100644 --- a/arch/arm/mm/cache-v7m.S +++ b/arch/arm/mm/cache-v7m.S @@ -183,13 +183,13 @@ flush_levels: and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic #endif write_csselr r10, r1 @ set current cache level isb @ isb to sych the new cssr&csidr read_ccsidr r1 @ read the new csidr -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION restore_irqs_notrace r9 #endif and r2, r1, #7 @ extract the length of the cache lines diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1b4476ddb83..3ab05857ca8f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -34,32 +34,32 @@ config ARM64 select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_NMI_SAFE_CMPXCHG - select ARCH_INLINE_READ_LOCK if !PREEMPT - select ARCH_INLINE_READ_LOCK_BH if !PREEMPT - select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT - select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT - select ARCH_INLINE_READ_UNLOCK if !PREEMPT - select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT - select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT - select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT - select ARCH_INLINE_WRITE_LOCK if !PREEMPT - select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT - select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT - select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT - select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT - select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT - select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT - select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT - select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT - select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT - select ARCH_INLINE_SPIN_LOCK if !PREEMPT - select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT - select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT - select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT - select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT - select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT - select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT - select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT + select ARCH_INLINE_READ_LOCK if !PREEMPTION + select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION + select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION + select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION select ARCH_KEEP_MEMBLOCK select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_QUEUED_RWLOCKS diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c index e273faca924f..999da59f03a9 100644 --- a/arch/arm64/crypto/sha256-glue.c +++ b/arch/arm64/crypto/sha256-glue.c @@ -97,7 +97,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data, * input when running on a preemptible kernel, but process the * data block by block instead. */ - if (IS_ENABLED(CONFIG_PREEMPT) && + if (IS_ENABLED(CONFIG_PREEMPTION) && chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE) chunk = SHA256_BLOCK_SIZE - sctx->count % SHA256_BLOCK_SIZE; diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index b8cf7c85ffa2..2cc0dd8bd9f7 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -699,8 +699,8 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU * where <label> is optional, and marks the point where execution will resume * after a yield has been performed. If omitted, execution resumes right after * the endif_yield_neon invocation. Note that the entire sequence, including - * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT - * is not defined. + * the provided patchup code, will be omitted from the image if + * CONFIG_PREEMPTION is not defined. * * As a convenience, in the case where no patchup code is required, the above * sequence may be abbreviated to @@ -728,7 +728,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .endm .macro if_will_cond_yield_neon -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION get_current_task x0 ldr x0, [x0, #TSK_TI_PREEMPT] sub x0, x0, #PREEMPT_DISABLE_OFFSET diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h index d49951647014..80e946b2abee 100644 --- a/arch/arm64/include/asm/preempt.h +++ b/arch/arm64/include/asm/preempt.h @@ -79,11 +79,11 @@ static inline bool should_resched(int preempt_offset) return pc == preempt_offset; } -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION void preempt_schedule(void); #define __preempt_schedule() preempt_schedule() void preempt_schedule_notrace(void); #define __preempt_schedule_notrace() preempt_schedule_notrace() -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ #endif /* __ASM_PREEMPT_H */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 7c6a0a41676f..099497b4bc09 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -605,7 +605,7 @@ el1_irq: irq_handler -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count alternative_if ARM64_HAS_IRQ_PRIO_MASKING /* diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 73caf35c2262..cf402be5c573 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -144,9 +144,12 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) #ifdef CONFIG_PREEMPT #define S_PREEMPT " PREEMPT" +#elif defined(CONFIG_PREEMPT_RT) +#define S_PREEMPT " PREEMPT_RT" #else #define S_PREEMPT "" #endif + #define S_SMP " SMP" static int __die(const char *str, int err, struct pt_regs *regs) diff --git a/arch/c6x/kernel/entry.S b/arch/c6x/kernel/entry.S index 4332a10aec6c..fb154d19625b 100644 --- a/arch/c6x/kernel/entry.S +++ b/arch/c6x/kernel/entry.S @@ -18,7 +18,7 @@ #define DP B14 #define SP B15 -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION #define resume_kernel restore_all #endif @@ -287,7 +287,7 @@ work_notifysig: ;; is a little bit different ;; ENTRY(ret_from_exception) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION MASK_INT B2 #endif @@ -557,7 +557,7 @@ ENDPROC(_nmi_handler) ;; ;; Jump to schedule() then return to ret_from_isr ;; -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION resume_kernel: GET_THREAD_INFO A12 LDW .D1T1 *+A12(THREAD_INFO_PREEMPT_COUNT),A1 @@ -582,7 +582,7 @@ preempt_schedule: B .S2 preempt_schedule_irq #endif ADDKPC .S2 preempt_schedule,B3,4 -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ ENTRY(enable_exception) DINT diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S index a7a5b67df898..007706328000 100644 --- a/arch/csky/kernel/entry.S +++ b/arch/csky/kernel/entry.S @@ -277,7 +277,7 @@ ENTRY(csky_irq) zero_fp psrset ee -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION mov r9, sp /* Get current stack pointer */ bmaski r10, THREAD_SHIFT andn r9, r10 /* Get thread_info */ @@ -294,7 +294,7 @@ ENTRY(csky_irq) mov a0, sp jbsr csky_do_IRQ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION subi r12, 1 stw r12, (r9, TINFO_PREEMPT) cmpnei r12, 0 diff --git a/arch/h8300/kernel/entry.S b/arch/h8300/kernel/entry.S index 4ade5f8299ba..c6e289b5f1f2 100644 --- a/arch/h8300/kernel/entry.S +++ b/arch/h8300/kernel/entry.S @@ -284,12 +284,12 @@ badsys: mov.l er0,@(LER0:16,sp) bra resume_userspace -#if !defined(CONFIG_PREEMPT) +#if !defined(CONFIG_PREEMPTION) #define resume_kernel restore_all #endif ret_from_exception: -#if defined(CONFIG_PREEMPT) +#if defined(CONFIG_PREEMPTION) orc #0xc0,ccr #endif ret_from_interrupt: @@ -319,7 +319,7 @@ work_resched: restore_all: RESTORE_ALL /* Does RTE */ -#if defined(CONFIG_PREEMPT) +#if defined(CONFIG_PREEMPTION) resume_kernel: mov.l @(TI_PRE_COUNT:16,er4),er0 bne restore_all:8 diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S index 12242c27e2df..65a1ea0eed2f 100644 --- a/arch/hexagon/kernel/vm_entry.S +++ b/arch/hexagon/kernel/vm_entry.S @@ -265,12 +265,12 @@ event_dispatch: * should be in the designated register (usually R19) * * If we were in kernel mode, we don't need to check scheduler - * or signals if CONFIG_PREEMPT is not set. If set, then it has + * or signals if CONFIG_PREEMPTION is not set. If set, then it has * to jump to a need_resched kind of block. - * BTW, CONFIG_PREEMPT is not supported yet. + * BTW, CONFIG_PREEMPTION is not supported yet. */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION R0 = #VM_INT_DISABLE trap1(#HVM_TRAP1_VMSETIE) #endif diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index a9992be5718b..2ac926331500 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -670,12 +670,12 @@ GLOBAL_ENTRY(ia64_leave_syscall) * * p6 controls whether current_thread_info()->flags needs to be check for * extra work. We always check for extra work when returning to user-level. - * With CONFIG_PREEMPT, we also check for extra work when the preempt_count + * With CONFIG_PREEMPTION, we also check for extra work when the preempt_count * is 0. After extra work processing has been completed, execution * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check * needs to be redone. */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION RSM_PSR_I(p0, r2, r18) // disable interrupts cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 @@ -685,7 +685,7 @@ GLOBAL_ENTRY(ia64_leave_syscall) (pUStk) mov r21=0 // r21 <- 0 ;; cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) -#else /* !CONFIG_PREEMPT */ +#else /* !CONFIG_PREEMPTION */ RSM_PSR_I(pUStk, r2, r18) cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk @@ -814,12 +814,12 @@ GLOBAL_ENTRY(ia64_leave_kernel) * * p6 controls whether current_thread_info()->flags needs to be check for * extra work. We always check for extra work when returning to user-level. - * With CONFIG_PREEMPT, we also check for extra work when the preempt_count + * With CONFIG_PREEMPTION, we also check for extra work when the preempt_count * is 0. After extra work processing has been completed, execution * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check * needs to be redone. */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION RSM_PSR_I(p0, r17, r31) // disable interrupts cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 @@ -1120,7 +1120,7 @@ skip_rbs_switch: /* * On entry: - * r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPT) + * r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPTION) * r31 = current->thread_info->flags * On exit: * p6 = TRUE if work-pending-check needs to be redone diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index b8356edbde65..a6d6a0556f08 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -841,7 +841,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) return 1; } -#if !defined(CONFIG_PREEMPT) +#if !defined(CONFIG_PREEMPTION) if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { /* Boost up -- we can execute copied instructions directly */ ia64_psr(regs)->ri = p->ainsn.slot; diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index de7083bd1d24..f6ded356394a 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S @@ -728,7 +728,7 @@ no_intr_resched: bri 6f; /* MS: Return to kernel state. */ 2: -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get preempt_count from thread info */ lwi r5, r11, TI_PREEMPT_COUNT; diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index feb069cbf44e..655f40ddb6d1 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -63,7 +63,7 @@ .endm .macro local_irq_disable reg=t0 -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION lw \reg, TI_PRE_COUNT($28) addi \reg, \reg, 1 sw \reg, TI_PRE_COUNT($28) @@ -73,7 +73,7 @@ xori \reg, \reg, 1 mtc0 \reg, CP0_STATUS irq_disable_hazard -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION lw \reg, TI_PRE_COUNT($28) addi \reg, \reg, -1 sw \reg, TI_PRE_COUNT($28) diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 5469d43b6966..4849a48afc0f 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -19,7 +19,7 @@ #include <asm/thread_info.h> #include <asm/war.h> -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION #define resume_kernel restore_all #else #define __ret_from_irq ret_from_exception @@ -27,7 +27,7 @@ .text .align 5 -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION FEXPORT(ret_from_exception) local_irq_disable # preempt stop b __ret_from_irq @@ -53,7 +53,7 @@ resume_userspace: bnez t0, work_pending j restore_all -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION resume_kernel: local_irq_disable lw t0, TI_PRE_COUNT($28) diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 12c06a833b7c..e30298e99e1b 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -62,7 +62,7 @@ config GENERIC_HWEIGHT config GENERIC_LOCKBREAK def_bool y - depends on PREEMPT + depends on PREEMPTION config TRACE_IRQFLAGS_SUPPORT def_bool y diff --git a/arch/nds32/kernel/ex-exit.S b/arch/nds32/kernel/ex-exit.S index 1df02a793364..6a2966c2d8c8 100644 --- a/arch/nds32/kernel/ex-exit.S +++ b/arch/nds32/kernel/ex-exit.S @@ -72,7 +72,7 @@ restore_user_regs_last .endm -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION .macro preempt_stop .endm #else @@ -158,7 +158,7 @@ no_work_pending: /* * preemptive kernel */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION resume_kernel: gie_disable lwi $t0, [tsk+#TSK_TI_PREEMPT] diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S index 1e515ccd698e..3d8d1d0bcb64 100644 --- a/arch/nios2/kernel/entry.S +++ b/arch/nios2/kernel/entry.S @@ -365,7 +365,7 @@ ENTRY(ret_from_interrupt) ldw r1, PT_ESTATUS(sp) /* check if returning to kernel */ TSTBNZ r1, r1, ESTATUS_EU, Luser_return -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION GET_THREAD_INFO r1 ldw r4, TI_PREEMPT_COUNT(r1) bne r4, r0, restore_all diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index b16237c95ea3..593e4014cef8 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -81,7 +81,7 @@ config STACK_GROWSUP config GENERIC_LOCKBREAK bool default y - depends on SMP && PREEMPT + depends on SMP && PREEMPTION config ARCH_HAS_ILOG2_U32 bool diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index b96d74496977..9a03e29c8733 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -940,14 +940,14 @@ intr_restore: rfi nop -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION # define intr_do_preempt intr_restore -#endif /* !CONFIG_PREEMPT */ +#endif /* !CONFIG_PREEMPTION */ .import schedule,code intr_do_resched: /* Only call schedule on return to userspace. If we're returning - * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise + * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise * we jump back to intr_restore. */ LDREG PT_IASQ0(%r16), %r20 @@ -979,7 +979,7 @@ intr_do_resched: * and preempt_count is 0. otherwise, we continue on * our merry way back to the current running task. */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION .import preempt_schedule_irq,code intr_do_preempt: rsm PSW_SM_I, %r0 /* disable interrupts */ @@ -999,7 +999,7 @@ intr_do_preempt: nop b,n intr_restore /* ssm PSW_SM_I done by intr_restore */ -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ /* * External interrupts. diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 1ec34e16ed65..6987b0832e5f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -106,7 +106,7 @@ config LOCKDEP_SUPPORT config GENERIC_LOCKBREAK bool default y - depends on SMP && PREEMPT + depends on SMP && PREEMPTION config GENERIC_HWEIGHT bool diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index d60908ea37fb..e1a4c39b83b8 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -897,7 +897,7 @@ resume_kernel: bne- 0b 1: -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION /* check current_thread_info->preempt_count */ lwz r0,TI_PREEMPT(r2) cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ @@ -921,7 +921,7 @@ resume_kernel: */ bl trace_hardirqs_on #endif -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ restore_kuap: kuap_restore r1, r2, r9, r10, r0 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 3fd3ef352e3f..a9a1d3cdb523 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -846,7 +846,7 @@ resume_kernel: bne- 0b 1: -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION /* Check if we need to preempt */ andi. r0,r4,_TIF_NEED_RESCHED beq+ restore @@ -877,7 +877,7 @@ resume_kernel: li r10,MSR_RI mtmsrd r10,1 /* Update machine state */ #endif /* CONFIG_PPC_BOOK3E */ -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ .globl fast_exc_return_irq fast_exc_return_irq: diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index a1349ca64669..dd61bb247ce7 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -155,7 +155,7 @@ _save_context: REG_L x2, PT_SP(sp) .endm -#if !IS_ENABLED(CONFIG_PREEMPT) +#if !IS_ENABLED(CONFIG_PREEMPTION) .set resume_kernel, restore_all #endif @@ -304,7 +304,7 @@ restore_all: sret #endif -#if IS_ENABLED(CONFIG_PREEMPT) +#if IS_ENABLED(CONFIG_PREEMPTION) resume_kernel: REG_L s0, TASK_TI_PREEMPT_COUNT(tp) bnez s0, restore_all diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index bc88841d335d..6d67e9cc8888 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -30,7 +30,7 @@ config GENERIC_BUG_RELATIVE_POINTERS def_bool y config GENERIC_LOCKBREAK - def_bool y if PREEMPT + def_bool y if PREEMPTTION config PGSTE def_bool y if KVM diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h index b5ea9e14c017..6ede29907fbf 100644 --- a/arch/s390/include/asm/preempt.h +++ b/arch/s390/include/asm/preempt.h @@ -130,11 +130,11 @@ static inline bool should_resched(int preempt_offset) #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION extern asmlinkage void preempt_schedule(void); #define __preempt_schedule() preempt_schedule() extern asmlinkage void preempt_schedule_notrace(void); #define __preempt_schedule_notrace() preempt_schedule_notrace() -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ #endif /* __ASM_PREEMPT_H */ diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index d306fe04489a..2c122d8bab93 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -195,6 +195,8 @@ void die(struct pt_regs *regs, const char *str) regs->int_code >> 17, ++die_counter); #ifdef CONFIG_PREEMPT pr_cont("PREEMPT "); +#elif defined(CONFIG_PREEMPT_RT) + pr_cont("PREEMPT_RT "); #endif pr_cont("SMP "); if (debug_pagealloc_enabled()) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 270d1d145761..9205add8481d 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -790,7 +790,7 @@ ENTRY(io_int_handler) .Lio_work: tm __PT_PSW+1(%r11),0x01 # returning to user ? jo .Lio_work_user # yes -> do resched & signal -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION # check for preemptive scheduling icm %r0,15,__LC_PREEMPT_COUNT jnz .Lio_restore # preemption is disabled diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index f356ee674d89..9ece111b0254 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -108,7 +108,7 @@ config GENERIC_CALIBRATE_DELAY config GENERIC_LOCKBREAK def_bool y - depends on SMP && PREEMPT + depends on SMP && PREEMPTION config ARCH_SUSPEND_POSSIBLE def_bool n diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S index de68ffdfffbf..81c8b64b977f 100644 --- a/arch/sh/kernel/cpu/sh5/entry.S +++ b/arch/sh/kernel/cpu/sh5/entry.S @@ -86,7 +86,7 @@ andi r6, ~0xf0, r6; \ putcon r6, SR; -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION # define preempt_stop() CLI() #else # define preempt_stop() @@ -884,7 +884,7 @@ ret_from_exception: /* Check softirqs */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION pta ret_from_syscall, tr0 blink tr0, ZERO diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index d31f66e82ce5..956a7a03b0c8 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -41,7 +41,7 @@ */ #include <asm/dwarf.h> -#if defined(CONFIG_PREEMPT) +#if defined(CONFIG_PREEMPTION) # define preempt_stop() cli ; TRACE_IRQS_OFF #else # define preempt_stop() @@ -84,7 +84,7 @@ ENTRY(ret_from_irq) get_current_thread_info r8, r0 bt resume_kernel ! Yes, it's from kernel, go back soon -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION bra resume_userspace nop ENTRY(resume_kernel) diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index eb24cb1afc11..e8c3ea01c12f 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -277,7 +277,7 @@ config US3_MC config GENERIC_LOCKBREAK bool default y - depends on SPARC64 && SMP && PREEMPT + depends on SPARC64 && SMP && PREEMPTION config NUMA bool "NUMA support" diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 29aa34f11720..c5fd4b450d9b 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -310,7 +310,7 @@ kern_rtt_restore: retry to_kernel: -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION ldsw [%g6 + TI_PRE_COUNT], %l5 brnz %l5, kern_fpucheck ldx [%g6 + TI_FLAGS], %l5 diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index be897803834a..2c9e48566e48 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -520,7 +520,7 @@ common_exception_return: call4 schedule # void schedule (void) j 1b -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION 6: _bbci.l a4, TIF_NEED_RESCHED, 4f diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 87bd68dd7687..0976e27b8d5d 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -519,12 +519,15 @@ DEFINE_SPINLOCK(die_lock); void die(const char * str, struct pt_regs * regs, long err) { static int die_counter; + const char *pr = ""; + + if (IS_ENABLED(CONFIG_PREEMPTION)) + pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT"; console_verbose(); spin_lock_irq(&die_lock); - pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, - IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : ""); + pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr); show_regs(regs); if (!user_mode(regs)) show_stack(NULL, (unsigned long*)regs->areg[1]); diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c index 8b9919c26095..70650b248de5 100644 --- a/drivers/xen/preempt.c +++ b/drivers/xen/preempt.c @@ -8,7 +8,7 @@ #include <linux/sched.h> #include <xen/xen-ops.h> -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION /* * Some hypercalls issued by the toolstack can take many 10s of @@ -37,4 +37,4 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void) __this_cpu_write(xen_in_preemptible_hcall, true); } } -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index fc1b564b9cfe..0ee5386926fa 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -168,7 +168,7 @@ btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \ write_seqcount_end(&dev->data_seqcount); \ preempt_enable(); \ } -#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) +#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) #define BTRFS_DEVICE_GETSET_FUNCS(name) \ static inline u64 \ btrfs_device_get_##name(const struct btrfs_device *dev) \ diff --git a/fs/stack.c b/fs/stack.c index 4ef2c056269d..c9830924eb12 100644 --- a/fs/stack.c +++ b/fs/stack.c @@ -23,7 +23,7 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src) /* * But on 32-bit, we ought to make an effort to keep the two halves of - * i_blocks in sync despite SMP or PREEMPT - though stat's + * i_blocks in sync despite SMP or PREEMPTION - though stat's * generic_fillattr() doesn't bother, and we won't be applying quotas * (where i_blocks does become important) at the upper level. * @@ -38,14 +38,14 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src) spin_unlock(&src->i_lock); /* - * If CONFIG_SMP or CONFIG_PREEMPT on 32-bit, it's vital for + * If CONFIG_SMP or CONFIG_PREEMPTION on 32-bit, it's vital for * fsstack_copy_inode_size() to hold some lock around * i_size_write(), otherwise i_size_read() may spin forever (see * include/linux/fs.h). We don't necessarily hold i_mutex when this * is called, so take i_lock for that case. * * And if on 32-bit, continue our effort to keep the two halves of - * i_blocks in sync despite SMP or PREEMPT: use i_lock for that case + * i_blocks in sync despite SMP or PREEMPTION: use i_lock for that case * too, and do both at once by combining the tests. * * There is none of this locking overhead in the 64-bit case. diff --git a/include/linux/fs.h b/include/linux/fs.h index 98e0349adb52..dddfcbb140a7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -855,7 +855,7 @@ static inline loff_t i_size_read(const struct inode *inode) i_size = inode->i_size; } while (read_seqcount_retry(&inode->i_size_seqcount, seq)); return i_size; -#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) +#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) loff_t i_size; preempt_disable(); @@ -880,7 +880,7 @@ static inline void i_size_write(struct inode *inode, loff_t i_size) inode->i_size = i_size; write_seqcount_end(&inode->i_size_seqcount); preempt_enable(); -#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) +#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) preempt_disable(); inode->i_size = i_size; preempt_enable(); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 8bb63027e4d6..a927829bb73a 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -718,7 +718,7 @@ static inline void hd_free_part(struct hd_struct *part) * accessor function. * * Code written along the lines of i_size_read() and i_size_write(). - * CONFIG_PREEMPT case optimizes the case of UP kernel with preemption + * CONFIG_PREEMPTION case optimizes the case of UP kernel with preemption * on. */ static inline sector_t part_nr_sects_read(struct hd_struct *part) @@ -731,7 +731,7 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part) nr_sects = part->nr_sects; } while (read_seqcount_retry(&part->nr_sects_seq, seq)); return nr_sects; -#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) +#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) sector_t nr_sects; preempt_disable(); @@ -754,7 +754,7 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) write_seqcount_begin(&part->nr_sects_seq); part->nr_sects = size; write_seqcount_end(&part->nr_sects_seq); -#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) +#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) preempt_disable(); part->nr_sects = size; preempt_enable(); diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index f9a0c6189852..648298f877da 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -33,7 +33,6 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void * bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, struct cpu_stop_work *work_buf); int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); -int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); void stop_machine_park(int cpu); void stop_machine_unpark(int cpu); void stop_machine_yield(const struct cpumask *cpumask); @@ -90,12 +89,6 @@ static inline int stop_cpus(const struct cpumask *cpumask, return -ENOENT; } -static inline int try_stop_cpus(const struct cpumask *cpumask, - cpu_stop_fn_t fn, void *arg) -{ - return stop_cpus(cpumask, fn, arg); -} - #endif /* CONFIG_SMP */ /* diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index d89969aa9942..095be1d66f31 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -215,7 +215,7 @@ bool xen_running_on_version_or_later(unsigned int major, unsigned int minor); void xen_efi_runtime_setup(void); -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION static inline void xen_preemptible_hcall_begin(void) { @@ -239,6 +239,6 @@ static inline void xen_preemptible_hcall_end(void) __this_cpu_write(xen_in_preemptible_hcall, false); } -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ #endif /* INCLUDE_XEN_OPS_H */ diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index e0852dc333ac..3de8fd11873b 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks @@ -101,7 +101,7 @@ config UNINLINE_SPIN_UNLOCK # unlock and unlock_irq functions are inlined when: # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y # or -# - DEBUG_SPINLOCK=n and PREEMPT=n +# - DEBUG_SPINLOCK=n and PREEMPTION=n # # unlock_bh and unlock_irqrestore functions are inlined when: # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y @@ -139,7 +139,7 @@ config INLINE_SPIN_UNLOCK_BH config INLINE_SPIN_UNLOCK_IRQ def_bool y - depends on !PREEMPT || ARCH_INLINE_SPIN_UNLOCK_IRQ + depends on !PREEMPTION || ARCH_INLINE_SPIN_UNLOCK_IRQ config INLINE_SPIN_UNLOCK_IRQRESTORE def_bool y @@ -168,7 +168,7 @@ config INLINE_READ_LOCK_IRQSAVE config INLINE_READ_UNLOCK def_bool y - depends on !PREEMPT || ARCH_INLINE_READ_UNLOCK + depends on !PREEMPTION || ARCH_INLINE_READ_UNLOCK config INLINE_READ_UNLOCK_BH def_bool y @@ -176,7 +176,7 @@ config INLINE_READ_UNLOCK_BH config INLINE_READ_UNLOCK_IRQ def_bool y - depends on !PREEMPT || ARCH_INLINE_READ_UNLOCK_IRQ + depends on !PREEMPTION || ARCH_INLINE_READ_UNLOCK_IRQ config INLINE_READ_UNLOCK_IRQRESTORE def_bool y @@ -205,7 +205,7 @@ config INLINE_WRITE_LOCK_IRQSAVE config INLINE_WRITE_UNLOCK def_bool y - depends on !PREEMPT || ARCH_INLINE_WRITE_UNLOCK + depends on !PREEMPTION || ARCH_INLINE_WRITE_UNLOCK config INLINE_WRITE_UNLOCK_BH def_bool y @@ -213,7 +213,7 @@ config INLINE_WRITE_UNLOCK_BH config INLINE_WRITE_UNLOCK_IRQ def_bool y - depends on !PREEMPT || ARCH_INLINE_WRITE_UNLOCK_IRQ + depends on !PREEMPTION || ARCH_INLINE_WRITE_UNLOCK_IRQ config INLINE_WRITE_UNLOCK_IRQRESTORE def_bool y diff --git a/kernel/cpu.c b/kernel/cpu.c index a59cc980adad..e7f79674824d 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -525,8 +525,7 @@ static int bringup_wait_for_ap(unsigned int cpu) if (WARN_ON_ONCE((!cpu_online(cpu)))) return -ECANCELED; - /* Unpark the stopper thread and the hotplug thread of the target cpu */ - stop_machine_unpark(cpu); + /* Unpark the hotplug thread of the target cpu */ kthread_unpark(st->thread); /* @@ -1089,8 +1088,8 @@ void notify_cpu_starting(unsigned int cpu) /* * Called from the idle task. Wake up the controlling task which brings the - * stopper and the hotplug thread of the upcoming CPU up and then delegates - * the rest of the online bringup to the hotplug thread. + * hotplug thread of the upcoming CPU up and then delegates the rest of the + * online bringup to the hotplug thread. */ void cpuhp_online_idle(enum cpuhp_state state) { @@ -1100,6 +1099,12 @@ void cpuhp_online_idle(enum cpuhp_state state) if (state != CPUHP_AP_ONLINE_IDLE) return; + /* + * Unpart the stopper thread before we start the idle loop (and start + * scheduling); this ensures the stopper task is always available. + */ + stop_machine_unpark(smp_processor_id()); + st->state = CPUHP_AP_ONLINE_IDLE; complete_ap_thread(st, true); } diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 1152259a4ca0..12bca64dff73 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -370,7 +370,7 @@ u64 sched_clock_cpu(int cpu) if (sched_clock_stable()) return sched_clock() + __sched_clock_offset; - if (!static_branch_unlikely(&sched_clock_running)) + if (!static_branch_likely(&sched_clock_running)) return sched_clock(); preempt_disable_notrace(); @@ -393,7 +393,7 @@ void sched_clock_tick(void) if (sched_clock_stable()) return; - if (!static_branch_unlikely(&sched_clock_running)) + if (!static_branch_likely(&sched_clock_running)) return; lockdep_assert_irqs_disabled(); @@ -460,7 +460,7 @@ void __init sched_clock_init(void) u64 sched_clock_cpu(int cpu) { - if (!static_branch_unlikely(&sched_clock_running)) + if (!static_branch_likely(&sched_clock_running)) return 0; return sched_clock(); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 90e4b00ace89..15508c202bf5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4540,17 +4540,17 @@ void set_user_nice(struct task_struct *p, long nice) p->prio = effective_prio(p); delta = p->prio - old_prio; - if (queued) { + if (queued) enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); - /* - * If the task increased its priority or is running and - * lowered its priority, then reschedule its CPU: - */ - if (delta < 0 || (delta > 0 && task_running(rq, p))) - resched_curr(rq); - } if (running) set_next_task(rq, p); + + /* + * If the task increased its priority or is running and + * lowered its priority, then reschedule its CPU: + */ + p->sched_class->prio_changed(rq, p, old_prio); + out_unlock: task_rq_unlock(rq, p, &rf); } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ba749f579714..8da0222924cf 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5828,6 +5828,7 @@ static inline int select_idle_smt(struct task_struct *p, int target) */ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target) { + struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); struct sched_domain *this_sd; u64 avg_cost, avg_idle; u64 time, cost; @@ -5859,11 +5860,11 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t time = cpu_clock(this); - for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { + cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); + + for_each_cpu_wrap(cpu, cpus, target) { if (!--nr) return si_cpu; - if (!cpumask_test_cpu(cpu, p->cpus_ptr)) - continue; if (available_idle_cpu(cpu)) break; if (si_cpu == -1 && sched_idle_cpu(cpu)) @@ -10333,6 +10334,9 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) if (!task_on_rq_queued(p)) return; + if (rq->cfs.nr_running == 1) + return; + /* * Reschedule if we are currently running on this runqueue and * our priority decreased, or if we are not currently running on diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index a96db50d40e0..bd006b79b360 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -129,8 +129,20 @@ accumulate_sum(u64 delta, struct sched_avg *sa, * Step 2 */ delta %= 1024; - contrib = __accumulate_pelt_segments(periods, - 1024 - sa->period_contrib, delta); + if (load) { + /* + * This relies on the: + * + * if (!load) + * runnable = running = 0; + * + * clause from ___update_load_sum(); this results in + * the below usage of @contrib to dissapear entirely, + * so no point in calculating it. + */ + contrib = __accumulate_pelt_segments(periods, + 1024 - sa->period_contrib, delta); + } } sa->period_contrib = delta; @@ -205,7 +217,9 @@ ___update_load_sum(u64 now, struct sched_avg *sa, * This means that weight will be 0 but not running for a sched_entity * but also for a cfs_rq if the latter becomes idle. As an example, * this happens during idle_balance() which calls - * update_blocked_averages() + * update_blocked_averages(). + * + * Also see the comment in accumulate_sum(). */ if (!load) runnable = running = 0; diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c index 45eba18a2898..02ce292b9bc0 100644 --- a/kernel/sched/wait_bit.c +++ b/kernel/sched/wait_bit.c @@ -179,6 +179,7 @@ void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int .bit_nr = -1, }, .wq_entry = { + .flags = flags, .private = current, .func = var_wake_function, .entry = LIST_HEAD_INIT(wbq_entry->wq_entry.entry), diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 1fe34a9fabc2..5d68ec4c4015 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -453,36 +453,6 @@ int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) return ret; } -/** - * try_stop_cpus - try to stop multiple cpus - * @cpumask: cpus to stop - * @fn: function to execute - * @arg: argument to @fn - * - * Identical to stop_cpus() except that it fails with -EAGAIN if - * someone else is already using the facility. - * - * CONTEXT: - * Might sleep. - * - * RETURNS: - * -EAGAIN if someone else is already stopping cpus, -ENOENT if - * @fn(@arg) was not executed at all because all cpus in @cpumask were - * offline; otherwise, 0 if all executions of @fn returned 0, any non - * zero return value if any returned non zero. - */ -int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) -{ - int ret; - - /* static works are used, process one request at a time */ - if (!mutex_trylock(&stop_cpus_mutex)) - return -EAGAIN; - ret = __stop_cpus(cpumask, fn, arg); - mutex_unlock(&stop_cpus_mutex); - return ret; -} - static int cpu_stop_should_run(unsigned int cpu) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index cfc923558e04..4114412649b8 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2280,7 +2280,7 @@ __acquires(&pool->lock) } /* - * The following prevents a kworker from hogging CPU on !PREEMPT + * The following prevents a kworker from hogging CPU on !PREEMPTION * kernels, where a requeueing work item waiting for something to * happen could deadlock with stop_machine as such work item could * indefinitely requeue itself while all other CPUs are trapped in diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5ffe144c9794..6859f523517b 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1025,7 +1025,7 @@ config DEBUG_TIMEKEEPING config DEBUG_PREEMPT bool "Debug preemptible kernel" - depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT + depends on DEBUG_KERNEL && PREEMPTION && TRACE_IRQFLAGS_SUPPORT default y help If you say Y here then the kernel will use a debug variant of the diff --git a/mm/memory.c b/mm/memory.c index 45442d9a4f52..1c4be871a237 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2203,7 +2203,7 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, pte_t *page_table, pte_t orig_pte) { int same = 1; -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION) if (sizeof(pte_t) > sizeof(unsigned long)) { spinlock_t *ptl = pte_lockptr(mm, pmd); spin_lock(ptl); diff --git a/mm/slub.c b/mm/slub.c index d11389710b12..f7c66dc2ba0b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1964,7 +1964,7 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, return get_any_partial(s, flags, c); } -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION /* * Calculate the next globally unique transaction for disambiguiation * during cmpxchg. The transactions start with the cpu number and are then @@ -2009,7 +2009,7 @@ static inline void note_cmpxchg_failure(const char *n, pr_info("%s %s: cmpxchg redo ", n, s->name); -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) pr_warn("due to cpu change %d -> %d\n", tid_to_cpu(tid), tid_to_cpu(actual_tid)); @@ -2637,7 +2637,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, unsigned long flags; local_irq_save(flags); -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION /* * We may have been preempted and rescheduled on a different * cpu before disabling interrupts. Need to reload cpu area @@ -2691,13 +2691,13 @@ redo: * as we end up on the original cpu again when doing the cmpxchg. * * We should guarantee that tid and kmem_cache are retrieved on - * the same cpu. It could be different if CONFIG_PREEMPT so we need + * the same cpu. It could be different if CONFIG_PREEMPTION so we need * to check if it is matched or not. */ do { tid = this_cpu_read(s->cpu_slab->tid); c = raw_cpu_ptr(s->cpu_slab); - } while (IS_ENABLED(CONFIG_PREEMPT) && + } while (IS_ENABLED(CONFIG_PREEMPTION) && unlikely(tid != READ_ONCE(c->tid))); /* @@ -2971,7 +2971,7 @@ redo: do { tid = this_cpu_read(s->cpu_slab->tid); c = raw_cpu_ptr(s->cpu_slab); - } while (IS_ENABLED(CONFIG_PREEMPT) && + } while (IS_ENABLED(CONFIG_PREEMPTION) && unlikely(tid != READ_ONCE(c->tid))); /* Same with comment on barrier() in slab_alloc_node() */ diff --git a/net/core/dev.c b/net/core/dev.c index 0ad39c87b7fd..2ca5bf191ae5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -928,7 +928,7 @@ EXPORT_SYMBOL(dev_get_by_napi_id); * * The use of raw_seqcount_begin() and cond_resched() before * retrying is required as we want to give the writers a chance - * to complete when CONFIG_PREEMPT is not set. + * to complete when CONFIG_PREEMPTION is not set. */ int netdev_get_name(struct net *net, char *name, int ifindex) { |