diff options
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/nmi.h | 6 | ||||
-rw-r--r-- | arch/s390/kernel/nmi.c | 41 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 13 |
4 files changed, 29 insertions, 33 deletions
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h index 2db45d7e68aa..55c9051dddfd 100644 --- a/arch/s390/include/asm/nmi.h +++ b/arch/s390/include/asm/nmi.h @@ -98,9 +98,9 @@ struct mcesa { struct pt_regs; -void nmi_alloc_boot_cpu(struct lowcore *lc); -int nmi_alloc_per_cpu(struct lowcore *lc); -void nmi_free_per_cpu(struct lowcore *lc); +void nmi_alloc_mcesa_early(u64 *mcesad); +int nmi_alloc_mcesa(u64 *mcesad); +void nmi_free_mcesa(u64 *mcesad); void s390_handle_mcck(void); void __s390_handle_mcck(void); diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 3f18c1412eba..1cf1e37553e8 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -58,27 +58,27 @@ static inline unsigned long nmi_get_mcesa_size(void) /* * The initial machine check extended save area for the boot CPU. - * It will be replaced by nmi_init() with an allocated structure. - * The structure is required for machine check happening early in - * the boot process. + * It will be replaced on the boot CPU reinit with an allocated + * structure. The structure is required for machine check happening + * early in the boot process. */ static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE); -void __init nmi_alloc_boot_cpu(struct lowcore *lc) +void __init nmi_alloc_mcesa_early(u64 *mcesad) { if (!nmi_needs_mcesa()) return; - lc->mcesad = __pa(&boot_mcesa); + *mcesad = __pa(&boot_mcesa); if (MACHINE_HAS_GS) - lc->mcesad |= ilog2(MCESA_MAX_SIZE); + *mcesad |= ilog2(MCESA_MAX_SIZE); } -static int __init nmi_init(void) +static void __init nmi_alloc_cache(void) { - unsigned long origin, cr0, size; + unsigned long size; if (!nmi_needs_mcesa()) - return 0; + return; size = nmi_get_mcesa_size(); if (size > MCESA_MIN_SIZE) mcesa_origin_lc = ilog2(size); @@ -86,40 +86,31 @@ static int __init nmi_init(void) mcesa_cache = kmem_cache_create("nmi_save_areas", size, size, 0, NULL); if (!mcesa_cache) panic("Couldn't create nmi save area cache"); - origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL); - if (!origin) - panic("Couldn't allocate nmi save area"); - /* The pointer is stored with mcesa_bits ORed in */ - kmemleak_not_leak((void *) origin); - __ctl_store(cr0, 0, 0); - __ctl_clear_bit(0, 28); /* disable lowcore protection */ - /* Replace boot_mcesa on the boot CPU */ - S390_lowcore.mcesad = __pa(origin) | mcesa_origin_lc; - __ctl_load(cr0, 0, 0); - return 0; } -early_initcall(nmi_init); -int nmi_alloc_per_cpu(struct lowcore *lc) +int __ref nmi_alloc_mcesa(u64 *mcesad) { unsigned long origin; + *mcesad = 0; if (!nmi_needs_mcesa()) return 0; + if (!mcesa_cache) + nmi_alloc_cache(); origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL); if (!origin) return -ENOMEM; /* The pointer is stored with mcesa_bits ORed in */ kmemleak_not_leak((void *) origin); - lc->mcesad = __pa(origin) | mcesa_origin_lc; + *mcesad = __pa(origin) | mcesa_origin_lc; return 0; } -void nmi_free_per_cpu(struct lowcore *lc) +void nmi_free_mcesa(u64 *mcesad) { if (!nmi_needs_mcesa()) return; - kmem_cache_free(mcesa_cache, __va(lc->mcesad & MCESA_ORIGIN_MASK)); + kmem_cache_free(mcesa_cache, __va(*mcesad & MCESA_ORIGIN_MASK)); } static notrace void s390_handle_damage(void) diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 225ab2d0a4c6..f2c25d113e7b 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -445,7 +445,7 @@ static void __init setup_lowcore_dat_off(void) lc->lpp = LPP_MAGIC; lc->machine_flags = S390_lowcore.machine_flags; lc->preempt_count = S390_lowcore.preempt_count; - nmi_alloc_boot_cpu(lc); + nmi_alloc_mcesa_early(&lc->mcesad); lc->sys_enter_timer = S390_lowcore.sys_enter_timer; lc->exit_timer = S390_lowcore.exit_timer; lc->user_timer = S390_lowcore.user_timer; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 2590c31a8fc3..1172aaf0b7e2 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -212,7 +212,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); lc->preempt_count = PREEMPT_DISABLED; - if (nmi_alloc_per_cpu(lc)) + if (nmi_alloc_mcesa(&lc->mcesad)) goto out; lowcore_ptr[cpu] = lc; pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); @@ -239,7 +239,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu) mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET; pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); lowcore_ptr[cpu] = NULL; - nmi_free_per_cpu(lc); + nmi_free_mcesa(&lc->mcesad); stack_free(async_stack); stack_free(mcck_stack); free_pages(nodat_stack, THREAD_SIZE_ORDER); @@ -1271,14 +1271,15 @@ static int __init smp_reinit_ipl_cpu(void) { unsigned long async_stack, nodat_stack, mcck_stack; struct lowcore *lc, *lc_ipl; - unsigned long flags; + unsigned long flags, cr0; + u64 mcesad; lc_ipl = lowcore_ptr[0]; lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); async_stack = stack_alloc(); mcck_stack = stack_alloc(); - if (!lc || !nodat_stack || !async_stack || !mcck_stack) + if (!lc || !nodat_stack || !async_stack || !mcck_stack || nmi_alloc_mcesa(&mcesad)) panic("Couldn't allocate memory"); local_irq_save(flags); @@ -1287,6 +1288,10 @@ static int __init smp_reinit_ipl_cpu(void) S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET; S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET; S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET; + __ctl_store(cr0, 0, 0); + __ctl_clear_bit(0, 28); /* disable lowcore protection */ + S390_lowcore.mcesad = mcesad; + __ctl_load(cr0, 0, 0); lowcore_ptr[0] = lc; local_mcck_enable(); local_irq_restore(flags); |