diff options
author | Dmitry Torokhov <dtor@insightbb.com> | 2007-05-01 00:24:54 -0400 |
---|---|---|
committer | Dmitry Torokhov <dtor@insightbb.com> | 2007-05-01 00:24:54 -0400 |
commit | bc95f3669f5e6f63cf0b84fe4922c3c6dd4aa775 (patch) | |
tree | 427fcf2a7287c16d4b5aa6cbf494d59579a6a8b1 /arch/s390/kernel/smp.c | |
parent | 3d29cdff999c37b3876082278a8134a0642a02cd (diff) | |
parent | dc87c3985e9b442c60994308a96f887579addc39 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/usb/input/Makefile
drivers/usb/input/gtco.c
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r-- | arch/s390/kernel/smp.c | 542 |
1 files changed, 278 insertions, 264 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 83a4ea6e3d60..3754e2031b39 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -1,12 +1,12 @@ /* * arch/s390/kernel/smp.c * - * Copyright (C) IBM Corp. 1999,2006 + * Copyright IBM Corp. 1999,2007 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), - * Martin Schwidefsky (schwidefsky@de.ibm.com) - * Heiko Carstens (heiko.carstens@de.ibm.com) + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * Heiko Carstens (heiko.carstens@de.ibm.com) * - * based on other smp stuff by + * based on other smp stuff by * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> * (c) 1998 Ingo Molnar * @@ -31,6 +31,8 @@ #include <linux/interrupt.h> #include <linux/cpu.h> #include <linux/timex.h> +#include <linux/bootmem.h> +#include <asm/ipl.h> #include <asm/setup.h> #include <asm/sigp.h> #include <asm/pgalloc.h> @@ -39,38 +41,39 @@ #include <asm/cpcmd.h> #include <asm/tlbflush.h> #include <asm/timer.h> - -extern volatile int __cpu_logical_map[]; +#include <asm/lowcore.h> /* * An array with a pointer the lowcore of every CPU. */ - struct _lowcore *lowcore_ptr[NR_CPUS]; +EXPORT_SYMBOL(lowcore_ptr); cpumask_t cpu_online_map = CPU_MASK_NONE; +EXPORT_SYMBOL(cpu_online_map); + cpumask_t cpu_possible_map = CPU_MASK_NONE; +EXPORT_SYMBOL(cpu_possible_map); static struct task_struct *current_set[NR_CPUS]; static void smp_ext_bitcall(int, ec_bit_sig); -static void smp_ext_bitcall_others(ec_bit_sig); /* - * Structure and data for smp_call_function(). This is designed to minimise - * static memory requirements. It also looks cleaner. + * Structure and data for __smp_call_function_map(). This is designed to + * minimise static memory requirements. It also looks cleaner. */ static DEFINE_SPINLOCK(call_lock); struct call_data_struct { void (*func) (void *info); void *info; - atomic_t started; - atomic_t finished; + cpumask_t started; + cpumask_t finished; int wait; }; -static struct call_data_struct * call_data; +static struct call_data_struct *call_data; /* * 'Call function' interrupt callback @@ -81,127 +84,123 @@ static void do_call_function(void) void *info = call_data->info; int wait = call_data->wait; - atomic_inc(&call_data->started); + cpu_set(smp_processor_id(), call_data->started); (*func)(info); if (wait) - atomic_inc(&call_data->finished); + cpu_set(smp_processor_id(), call_data->finished);; } -/* - * this function sends a 'generic call function' IPI to all other CPUs - * in the system. - */ - -int smp_call_function (void (*func) (void *info), void *info, int nonatomic, - int wait) -/* - * [SUMMARY] Run a function on all other CPUs. - * <func> The function to run. This must be fast and non-blocking. - * <info> An arbitrary pointer to pass to the function. - * <nonatomic> currently unused. - * <wait> If true, wait (atomically) until function has completed on other CPUs. - * [RETURNS] 0 on success, else a negative status code. Does not return until - * remote CPUs are nearly ready to execute <<func>> or are or have executed. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler. - */ +static void __smp_call_function_map(void (*func) (void *info), void *info, + int nonatomic, int wait, cpumask_t map) { struct call_data_struct data; - int cpus = num_online_cpus()-1; - - if (cpus <= 0) - return 0; + int cpu, local = 0; - /* Can deadlock when interrupts are disabled or if in wrong context */ + /* + * Can deadlock when interrupts are disabled or if in wrong context. + */ WARN_ON(irqs_disabled() || in_irq()); + /* + * Check for local function call. We have to have the same call order + * as in on_each_cpu() because of machine_restart_smp(). + */ + if (cpu_isset(smp_processor_id(), map)) { + local = 1; + cpu_clear(smp_processor_id(), map); + } + + cpus_and(map, map, cpu_online_map); + if (cpus_empty(map)) + goto out; + data.func = func; data.info = info; - atomic_set(&data.started, 0); + data.started = CPU_MASK_NONE; data.wait = wait; if (wait) - atomic_set(&data.finished, 0); + data.finished = CPU_MASK_NONE; spin_lock_bh(&call_lock); call_data = &data; - /* Send a message to all other CPUs and wait for them to respond */ - smp_ext_bitcall_others(ec_call_function); + + for_each_cpu_mask(cpu, map) + smp_ext_bitcall(cpu, ec_call_function); /* Wait for response */ - while (atomic_read(&data.started) != cpus) + while (!cpus_equal(map, data.started)) cpu_relax(); if (wait) - while (atomic_read(&data.finished) != cpus) + while (!cpus_equal(map, data.finished)) cpu_relax(); + spin_unlock_bh(&call_lock); - return 0; +out: + local_irq_disable(); + if (local) + func(info); + local_irq_enable(); } /* - * Call a function on one CPU - * cpu : the CPU the function should be executed on + * smp_call_function: + * @func: the function to run; this must be fast and non-blocking + * @info: an arbitrary pointer to pass to the function + * @nonatomic: unused + * @wait: if true, wait (atomically) until function has completed on other CPUs * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler. You may call it from a bottom half. + * Run a function on all other CPUs. * - * It is guaranteed that the called function runs on the specified CPU, - * preemption is disabled. + * You must not call this function with disabled interrupts, from a + * hardware interrupt handler or from a bottom half. */ -int smp_call_function_on(void (*func) (void *info), void *info, - int nonatomic, int wait, int cpu) +int smp_call_function(void (*func) (void *info), void *info, int nonatomic, + int wait) { - struct call_data_struct data; - int curr_cpu; - - if (!cpu_online(cpu)) - return -EINVAL; - - /* Can deadlock when interrupts are disabled or if in wrong context */ - WARN_ON(irqs_disabled() || in_irq()); - - /* disable preemption for local function call */ - curr_cpu = get_cpu(); - - if (curr_cpu == cpu) { - /* direct call to function */ - func(info); - put_cpu(); - return 0; - } - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - spin_lock_bh(&call_lock); - call_data = &data; - smp_ext_bitcall(cpu, ec_call_function); + cpumask_t map; - /* Wait for response */ - while (atomic_read(&data.started) != 1) - cpu_relax(); + preempt_disable(); + map = cpu_online_map; + cpu_clear(smp_processor_id(), map); + __smp_call_function_map(func, info, nonatomic, wait, map); + preempt_enable(); + return 0; +} +EXPORT_SYMBOL(smp_call_function); - if (wait) - while (atomic_read(&data.finished) != 1) - cpu_relax(); +/* + * smp_call_function_on: + * @func: the function to run; this must be fast and non-blocking + * @info: an arbitrary pointer to pass to the function + * @nonatomic: unused + * @wait: if true, wait (atomically) until function has completed on other CPUs + * @cpu: the CPU where func should run + * + * Run a function on one processor. + * + * You must not call this function with disabled interrupts, from a + * hardware interrupt handler or from a bottom half. + */ +int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, + int wait, int cpu) +{ + cpumask_t map = CPU_MASK_NONE; - spin_unlock_bh(&call_lock); - put_cpu(); + preempt_disable(); + cpu_set(cpu, map); + __smp_call_function_map(func, info, nonatomic, wait, map); + preempt_enable(); return 0; } EXPORT_SYMBOL(smp_call_function_on); static void do_send_stop(void) { - int cpu, rc; + int cpu, rc; - /* stop all processors */ + /* stop all processors */ for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; @@ -213,9 +212,9 @@ static void do_send_stop(void) static void do_store_status(void) { - int cpu, rc; + int cpu, rc; - /* store status of all processors in their lowcores (real 0) */ + /* store status of all processors in their lowcores (real 0) */ for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; @@ -223,8 +222,8 @@ static void do_store_status(void) rc = signal_processor_p( (__u32)(unsigned long) lowcore_ptr[cpu], cpu, sigp_store_status_at_address); - } while(rc == sigp_busy); - } + } while (rc == sigp_busy); + } } static void do_wait_for_stop(void) @@ -235,7 +234,7 @@ static void do_wait_for_stop(void) for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; - while(!smp_cpu_not_running(cpu)) + while (!smp_cpu_not_running(cpu)) cpu_relax(); } } @@ -249,7 +248,7 @@ void smp_send_stop(void) /* Disable all interrupts/machine checks */ __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); - /* write magic number to zero page (absolute 0) */ + /* write magic number to zero page (absolute 0) */ lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; /* stop other processors. */ @@ -265,8 +264,7 @@ void smp_send_stop(void) /* * Reboot, halt and power_off routines for SMP. */ - -void machine_restart_smp(char * __unused) +void machine_restart_smp(char *__unused) { smp_send_stop(); do_reipl(); @@ -297,17 +295,17 @@ void machine_power_off_smp(void) static void do_ext_call_interrupt(__u16 code) { - unsigned long bits; + unsigned long bits; - /* - * handle bit signal external calls - * - * For the ec_schedule signal we have to do nothing. All the work - * is done automatically when we return from the interrupt. - */ + /* + * handle bit signal external calls + * + * For the ec_schedule signal we have to do nothing. All the work + * is done automatically when we return from the interrupt. + */ bits = xchg(&S390_lowcore.ext_call_fast, 0); - if (test_bit(ec_call_function, &bits)) + if (test_bit(ec_call_function, &bits)) do_call_function(); } @@ -317,34 +315,14 @@ static void do_ext_call_interrupt(__u16 code) */ static void smp_ext_bitcall(int cpu, ec_bit_sig sig) { - /* - * Set signaling bit in lowcore of target cpu and kick it - */ + /* + * Set signaling bit in lowcore of target cpu and kick it + */ set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); - while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy) + while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) udelay(10); } -/* - * Send an external call sigp to every other cpu in the system and - * return without waiting for its completion. - */ -static void smp_ext_bitcall_others(ec_bit_sig sig) -{ - int cpu; - - for_each_online_cpu(cpu) { - if (cpu == smp_processor_id()) - continue; - /* - * Set signaling bit in lowcore of target cpu and kick it - */ - set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); - while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) - udelay(10); - } -} - #ifndef CONFIG_64BIT /* * this function sends a 'purge tlb' signal to another CPU. @@ -356,7 +334,7 @@ void smp_ptlb_callback(void *info) void smp_ptlb_all(void) { - on_each_cpu(smp_ptlb_callback, NULL, 0, 1); + on_each_cpu(smp_ptlb_callback, NULL, 0, 1); } EXPORT_SYMBOL(smp_ptlb_all); #endif /* ! CONFIG_64BIT */ @@ -368,7 +346,7 @@ EXPORT_SYMBOL(smp_ptlb_all); */ void smp_send_reschedule(int cpu) { - smp_ext_bitcall(cpu, ec_schedule); + smp_ext_bitcall(cpu, ec_schedule); } /* @@ -382,11 +360,12 @@ struct ec_creg_mask_parms { /* * callback for setting/clearing control bits */ -static void smp_ctl_bit_callback(void *info) { +static void smp_ctl_bit_callback(void *info) +{ struct ec_creg_mask_parms *pp = info; unsigned long cregs[16]; int i; - + __ctl_store(cregs, 0, 15); for (i = 0; i <= 15; i++) cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; @@ -405,6 +384,7 @@ void smp_ctl_set_bit(int cr, int bit) parms.orvals[cr] = 1 << bit; on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } +EXPORT_SYMBOL(smp_ctl_set_bit); /* * Clear a bit in a control register of all cpus @@ -418,13 +398,72 @@ void smp_ctl_clear_bit(int cr, int bit) parms.andvals[cr] = ~(1L << bit); on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } +EXPORT_SYMBOL(smp_ctl_clear_bit); + +#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) + +/* + * zfcpdump_prefix_array holds prefix registers for the following scenario: + * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to + * save its prefix registers, since they get lost, when switching from 31 bit + * to 64 bit. + */ +unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ + __attribute__((__section__(".data"))); + +static void __init smp_get_save_areas(void) +{ + unsigned int cpu, cpu_num, rc; + __u16 boot_cpu_addr; + + if (ipl_info.type != IPL_TYPE_FCP_DUMP) + return; + boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; + cpu_num = 1; + for (cpu = 0; cpu <= 65535; cpu++) { + if ((u16) cpu == boot_cpu_addr) + continue; + __cpu_logical_map[1] = (__u16) cpu; + if (signal_processor(1, sigp_sense) == sigp_not_operational) + continue; + if (cpu_num >= NR_CPUS) { + printk("WARNING: Registers for cpu %i are not " + "saved, since dump kernel was compiled with" + "NR_CPUS=%i!\n", cpu_num, NR_CPUS); + continue; + } + zfcpdump_save_areas[cpu_num] = + alloc_bootmem(sizeof(union save_area)); + while (1) { + rc = signal_processor(1, sigp_stop_and_store_status); + if (rc != sigp_busy) + break; + cpu_relax(); + } + memcpy(zfcpdump_save_areas[cpu_num], + (void *)(unsigned long) store_prefix() + + SAVE_AREA_BASE, SAVE_AREA_SIZE); +#ifdef __s390x__ + /* copy original prefix register */ + zfcpdump_save_areas[cpu_num]->s390x.pref_reg = + zfcpdump_prefix_array[cpu_num]; +#endif + cpu_num++; + } +} + +union save_area *zfcpdump_save_areas[NR_CPUS + 1]; +EXPORT_SYMBOL_GPL(zfcpdump_save_areas); + +#else +#define smp_get_save_areas() do { } while (0) +#endif /* * Lets check how many CPUs we have. */ -static unsigned int -__init smp_count_cpus(void) +static unsigned int __init smp_count_cpus(void) { unsigned int cpu, num_cpus; __u16 boot_cpu_addr; @@ -440,31 +479,30 @@ __init smp_count_cpus(void) if ((__u16) cpu == boot_cpu_addr) continue; __cpu_logical_map[1] = (__u16) cpu; - if (signal_processor(1, sigp_sense) == - sigp_not_operational) + if (signal_processor(1, sigp_sense) == sigp_not_operational) continue; num_cpus++; } - printk("Detected %d CPU's\n",(int) num_cpus); + printk("Detected %d CPU's\n", (int) num_cpus); printk("Boot cpu address %2X\n", boot_cpu_addr); return num_cpus; } /* - * Activate a secondary processor. + * Activate a secondary processor. */ int __devinit start_secondary(void *cpuvoid) { - /* Setup the cpu */ - cpu_init(); + /* Setup the cpu */ + cpu_init(); preempt_disable(); /* Enable TOD clock interrupts on the secondary cpu. */ - init_cpu_timer(); + init_cpu_timer(); #ifdef CONFIG_VIRT_TIMER /* Enable cpu timer interrupts on the secondary cpu. */ - init_cpu_vtimer(); + init_cpu_vtimer(); #endif /* Enable pfault pseudo page faults on this cpu. */ pfault_init(); @@ -473,11 +511,11 @@ int __devinit start_secondary(void *cpuvoid) cpu_set(smp_processor_id(), cpu_online_map); /* Switch on interrupts */ local_irq_enable(); - /* Print info about this processor */ - print_cpu_info(&S390_lowcore.cpu_data); - /* cpu_idle will call schedule for us */ - cpu_idle(); - return 0; + /* Print info about this processor */ + print_cpu_info(&S390_lowcore.cpu_data); + /* cpu_idle will call schedule for us */ + cpu_idle(); + return 0; } static void __init smp_create_idle(unsigned int cpu) @@ -494,56 +532,13 @@ static void __init smp_create_idle(unsigned int cpu) current_set[cpu] = p; } -/* Reserving and releasing of CPUs */ - -static DEFINE_SPINLOCK(smp_reserve_lock); -static int smp_cpu_reserved[NR_CPUS]; - -int -smp_get_cpu(cpumask_t cpu_mask) -{ - unsigned long flags; - int cpu; - - spin_lock_irqsave(&smp_reserve_lock, flags); - /* Try to find an already reserved cpu. */ - for_each_cpu_mask(cpu, cpu_mask) { - if (smp_cpu_reserved[cpu] != 0) { - smp_cpu_reserved[cpu]++; - /* Found one. */ - goto out; - } - } - /* Reserve a new cpu from cpu_mask. */ - for_each_cpu_mask(cpu, cpu_mask) { - if (cpu_online(cpu)) { - smp_cpu_reserved[cpu]++; - goto out; - } - } - cpu = -ENODEV; -out: - spin_unlock_irqrestore(&smp_reserve_lock, flags); - return cpu; -} - -void -smp_put_cpu(int cpu) -{ - unsigned long flags; - - spin_lock_irqsave(&smp_reserve_lock, flags); - smp_cpu_reserved[cpu]--; - spin_unlock_irqrestore(&smp_reserve_lock, flags); -} - -static int -cpu_stopped(int cpu) +static int cpu_stopped(int cpu) { __u32 status; /* Check for stopped state */ - if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { + if (signal_processor_ps(&status, 0, cpu, sigp_sense) == + sigp_status_stored) { if (status & 0x40) return 1; } @@ -552,14 +547,13 @@ cpu_stopped(int cpu) /* Upping and downing of CPUs */ -int -__cpu_up(unsigned int cpu) +int __cpu_up(unsigned int cpu) { struct task_struct *idle; - struct _lowcore *cpu_lowcore; + struct _lowcore *cpu_lowcore; struct stack_frame *sf; - sigp_ccode ccode; - int curr_cpu; + sigp_ccode ccode; + int curr_cpu; for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { __cpu_logical_map[cpu] = (__u16) curr_cpu; @@ -572,7 +566,7 @@ __cpu_up(unsigned int cpu) ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), cpu, sigp_set_prefix); - if (ccode){ + if (ccode) { printk("sigp_set_prefix failed for cpu %d " "with condition code %d\n", (int) cpu, (int) ccode); @@ -580,9 +574,9 @@ __cpu_up(unsigned int cpu) } idle = current_set[cpu]; - cpu_lowcore = lowcore_ptr[cpu]; + cpu_lowcore = lowcore_ptr[cpu]; cpu_lowcore->kernel_stack = (unsigned long) - task_stack_page(idle) + (THREAD_SIZE); + task_stack_page(idle) + THREAD_SIZE; sf = (struct stack_frame *) (cpu_lowcore->kernel_stack - sizeof(struct pt_regs) - sizeof(struct stack_frame)); @@ -594,11 +588,11 @@ __cpu_up(unsigned int cpu) " stam 0,15,0(%0)" : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; - cpu_lowcore->current_task = (unsigned long) idle; - cpu_lowcore->cpu_data.cpu_nr = cpu; + cpu_lowcore->current_task = (unsigned long) idle; + cpu_lowcore->cpu_data.cpu_nr = cpu; eieio(); - while (signal_processor(cpu,sigp_restart) == sigp_busy) + while (signal_processor(cpu, sigp_restart) == sigp_busy) udelay(10); while (!cpu_online(cpu)) @@ -613,6 +607,7 @@ void __init smp_setup_cpu_possible_map(void) { unsigned int phy_cpus, pos_cpus, cpu; + smp_get_save_areas(); phy_cpus = smp_count_cpus(); pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); @@ -644,18 +639,11 @@ static int __init setup_possible_cpus(char *s) } early_param("possible_cpus", setup_possible_cpus); -int -__cpu_disable(void) +int __cpu_disable(void) { - unsigned long flags; struct ec_creg_mask_parms cr_parms; int cpu = smp_processor_id(); - spin_lock_irqsave(&smp_reserve_lock, flags); - if (smp_cpu_reserved[cpu] != 0) { - spin_unlock_irqrestore(&smp_reserve_lock, flags); - return -EBUSY; - } cpu_clear(cpu, cpu_online_map); /* Disable pfault pseudo page faults on this cpu. */ @@ -666,24 +654,23 @@ __cpu_disable(void) /* disable all external interrupts */ cr_parms.orvals[0] = 0; - cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | - 1<<11 | 1<<10 | 1<< 6 | 1<< 4); + cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | + 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); /* disable all I/O interrupts */ cr_parms.orvals[6] = 0; - cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | - 1<<27 | 1<<26 | 1<<25 | 1<<24); + cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | + 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); /* disable most machine checks */ cr_parms.orvals[14] = 0; - cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); + cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | + 1 << 25 | 1 << 24); smp_ctl_bit_callback(&cr_parms); - spin_unlock_irqrestore(&smp_reserve_lock, flags); return 0; } -void -__cpu_die(unsigned int cpu) +void __cpu_die(unsigned int cpu) { /* Wait until target cpu is down */ while (!smp_cpu_not_running(cpu)) @@ -691,13 +678,12 @@ __cpu_die(unsigned int cpu) printk("Processor %d spun down\n", cpu); } -void -cpu_die(void) +void cpu_die(void) { idle_task_exit(); signal_processor(smp_processor_id(), sigp_stop); BUG(); - for(;;); + for (;;); } #endif /* CONFIG_HOTPLUG_CPU */ @@ -710,36 +696,36 @@ void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned long stack; unsigned int cpu; - int i; - - /* request the 0x1201 emergency signal external interrupt */ - if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) - panic("Couldn't request external interrupt 0x1201"); - memset(lowcore_ptr,0,sizeof(lowcore_ptr)); - /* - * Initialize prefix pages and stacks for all possible cpus - */ + int i; + + /* request the 0x1201 emergency signal external interrupt */ + if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) + panic("Couldn't request external interrupt 0x1201"); + memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); + /* + * Initialize prefix pages and stacks for all possible cpus + */ print_cpu_info(&S390_lowcore.cpu_data); - for_each_possible_cpu(i) { + for_each_possible_cpu(i) { lowcore_ptr[i] = (struct _lowcore *) - __get_free_pages(GFP_KERNEL|GFP_DMA, - sizeof(void*) == 8 ? 1 : 0); - stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); - if (lowcore_ptr[i] == NULL || stack == 0ULL) + __get_free_pages(GFP_KERNEL | GFP_DMA, + sizeof(void*) == 8 ? 1 : 0); + stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); + if (!lowcore_ptr[i] || !stack) panic("smp_boot_cpus failed to allocate memory\n"); *(lowcore_ptr[i]) = S390_lowcore; - lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); - stack = __get_free_pages(GFP_KERNEL,0); - if (stack == 0ULL) + lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE; + stack = __get_free_pages(GFP_KERNEL, 0); + if (!stack) panic("smp_boot_cpus failed to allocate memory\n"); - lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); + lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE; #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { lowcore_ptr[i]->extended_save_area_addr = - (__u32) __get_free_pages(GFP_KERNEL,0); - if (lowcore_ptr[i]->extended_save_area_addr == 0) + (__u32) __get_free_pages(GFP_KERNEL, 0); + if (!lowcore_ptr[i]->extended_save_area_addr) panic("smp_boot_cpus failed to " "allocate memory\n"); } @@ -778,35 +764,63 @@ void smp_cpus_done(unsigned int max_cpus) */ int setup_profiling_timer(unsigned int multiplier) { - return 0; + return 0; } static DEFINE_PER_CPU(struct cpu, cpu_devices); +static ssize_t show_capability(struct sys_device *dev, char *buf) +{ + unsigned int capability; + int rc; + + rc = get_cpu_capability(&capability); + if (rc) + return rc; + return sprintf(buf, "%u\n", capability); +} +static SYSDEV_ATTR(capability, 0444, show_capability, NULL); + +static int __cpuinit smp_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned int)(long)hcpu; + struct cpu *c = &per_cpu(cpu_devices, cpu); + struct sys_device *s = &c->sysdev; + + switch (action) { + case CPU_ONLINE: + if (sysdev_create_file(s, &attr_capability)) + return NOTIFY_BAD; + break; + case CPU_DEAD: + sysdev_remove_file(s, &attr_capability); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata smp_cpu_nb = { + .notifier_call = smp_cpu_notify, +}; + static int __init topology_init(void) { int cpu; - int ret; + + register_cpu_notifier(&smp_cpu_nb); for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); + struct sys_device *s = &c->sysdev; c->hotpluggable = 1; - ret = register_cpu(c, cpu); - if (ret) - printk(KERN_WARNING "topology_init: register_cpu %d " - "failed (%d)\n", cpu, ret); + register_cpu(c, cpu); + if (!cpu_online(cpu)) + continue; + s = &c->sysdev; + sysdev_create_file(s, &attr_capability); } return 0; } - subsys_initcall(topology_init); - -EXPORT_SYMBOL(cpu_online_map); -EXPORT_SYMBOL(cpu_possible_map); -EXPORT_SYMBOL(lowcore_ptr); -EXPORT_SYMBOL(smp_ctl_set_bit); -EXPORT_SYMBOL(smp_ctl_clear_bit); -EXPORT_SYMBOL(smp_call_function); -EXPORT_SYMBOL(smp_get_cpu); -EXPORT_SYMBOL(smp_put_cpu); |