aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/acpi/boot.c16
-rw-r--r--arch/x86/kernel/apb_timer.c29
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c80
-rw-r--r--arch/x86/kernel/hpet.c69
-rw-r--r--arch/x86/kernel/setup.c9
-rw-r--r--arch/x86/kernel/setup_percpu.c3
-rw-r--r--arch/x86/kernel/tboot.c25
8 files changed, 100 insertions, 133 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 9414f84584e4..6738e5c82cca 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -161,13 +161,15 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
/**
* acpi_register_lapic - register a local apic and generates a logic cpu number
* @id: local apic id to register
+ * @acpiid: ACPI id to register
* @enabled: this cpu is enabled or not
*
* Returns the logic cpu number which maps to the local apic
*/
-static int acpi_register_lapic(int id, u8 enabled)
+static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
{
unsigned int ver = 0;
+ int cpu;
if (id >= MAX_LOCAL_APIC) {
printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
@@ -182,7 +184,11 @@ static int acpi_register_lapic(int id, u8 enabled)
if (boot_cpu_physical_apicid != -1U)
ver = apic_version[boot_cpu_physical_apicid];
- return generic_processor_info(id, ver);
+ cpu = generic_processor_info(id, ver);
+ if (cpu >= 0)
+ early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
+
+ return cpu;
}
static int __init
@@ -212,7 +218,7 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
if (!apic->apic_id_valid(apic_id) && enabled)
printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
else
- acpi_register_lapic(apic_id, enabled);
+ acpi_register_lapic(apic_id, processor->uid, enabled);
#else
printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
#endif
@@ -240,6 +246,7 @@ acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
* when we use CPU hotplug.
*/
acpi_register_lapic(processor->id, /* APIC ID */
+ processor->processor_id, /* ACPI ID */
processor->lapic_flags & ACPI_MADT_ENABLED);
return 0;
@@ -258,6 +265,7 @@ acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end)
acpi_table_print_madt_entry(header);
acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */
+ processor->processor_id, /* ACPI ID */
processor->lapic_flags & ACPI_MADT_ENABLED);
return 0;
@@ -714,7 +722,7 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu)
{
int cpu;
- cpu = acpi_register_lapic(physid, ACPI_MADT_ENABLED);
+ cpu = acpi_register_lapic(physid, U32_MAX, ACPI_MADT_ENABLED);
if (cpu < 0) {
pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
return cpu;
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index cefacbad1531..456316f6c868 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -215,26 +215,18 @@ void apbt_setup_secondary_clock(void)
* cpu timers during the offline process due to the ordering of notification.
* the extra interrupt is harmless.
*/
-static int apbt_cpuhp_notify(struct notifier_block *n,
- unsigned long action, void *hcpu)
+static int apbt_cpu_dead(unsigned int cpu)
{
- unsigned long cpu = (unsigned long)hcpu;
struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu);
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DEAD:
- dw_apb_clockevent_pause(adev->timer);
- if (system_state == SYSTEM_RUNNING) {
- pr_debug("skipping APBT CPU %lu offline\n", cpu);
- } else {
- pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
- dw_apb_clockevent_stop(adev->timer);
- }
- break;
- default:
- pr_debug("APBT notified %lu, no action\n", action);
+ dw_apb_clockevent_pause(adev->timer);
+ if (system_state == SYSTEM_RUNNING) {
+ pr_debug("skipping APBT CPU %u offline\n", cpu);
+ } else {
+ pr_debug("APBT clockevent for cpu %u offline\n", cpu);
+ dw_apb_clockevent_stop(adev->timer);
}
- return NOTIFY_OK;
+ return 0;
}
static __init int apbt_late_init(void)
@@ -242,9 +234,8 @@ static __init int apbt_late_init(void)
if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
!apb_timer_block_enabled)
return 0;
- /* This notifier should be called after workqueue is ready */
- hotcpu_notifier(apbt_cpuhp_notify, -20);
- return 0;
+ return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "X86_APB_DEAD", NULL,
+ apbt_cpu_dead);
}
fs_initcall(apbt_late_init);
#else
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index f943d2f453a4..ac8d8ad8b009 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -92,8 +92,10 @@ static int apic_extnmi = APIC_EXTNMI_BSP;
*/
DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid, BAD_APICID);
+DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid, U32_MAX);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
+EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_acpiid);
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 24170d0809ba..6368fa69d2af 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -152,68 +152,48 @@ static void init_x2apic_ldr(void)
}
}
- /*
- * At CPU state changes, update the x2apic cluster sibling info.
- */
-static int
-update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
+/*
+ * At CPU state changes, update the x2apic cluster sibling info.
+ */
+int x2apic_prepare_cpu(unsigned int cpu)
{
- unsigned int this_cpu = (unsigned long)hcpu;
- unsigned int cpu;
- int err = 0;
-
- switch (action) {
- case CPU_UP_PREPARE:
- if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, this_cpu),
- GFP_KERNEL)) {
- err = -ENOMEM;
- } else if (!zalloc_cpumask_var(&per_cpu(ipi_mask, this_cpu),
- GFP_KERNEL)) {
- free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
- err = -ENOMEM;
- }
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- for_each_online_cpu(cpu) {
- if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
- continue;
- cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
- cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
- }
- free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
- free_cpumask_var(per_cpu(ipi_mask, this_cpu));
- break;
+ if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL))
+ return -ENOMEM;
+
+ if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) {
+ free_cpumask_var(per_cpu(cpus_in_cluster, cpu));
+ return -ENOMEM;
}
- return notifier_from_errno(err);
+ return 0;
}
-static struct notifier_block x2apic_cpu_notifier = {
- .notifier_call = update_clusterinfo,
-};
-
-static int x2apic_init_cpu_notifier(void)
+int x2apic_dead_cpu(unsigned int this_cpu)
{
- int cpu = smp_processor_id();
-
- zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL);
- zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL);
+ int cpu;
- BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu));
-
- cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
- register_hotcpu_notifier(&x2apic_cpu_notifier);
- return 1;
+ for_each_online_cpu(cpu) {
+ if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
+ continue;
+ cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
+ cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
+ }
+ free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
+ free_cpumask_var(per_cpu(ipi_mask, this_cpu));
+ return 0;
}
static int x2apic_cluster_probe(void)
{
- if (x2apic_mode)
- return x2apic_init_cpu_notifier();
- else
+ int cpu = smp_processor_id();
+
+ if (!x2apic_mode)
return 0;
+
+ cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
+ cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE",
+ x2apic_prepare_cpu, x2apic_dead_cpu);
+ return 1;
}
static const struct cpumask *x2apic_cluster_target_cpus(void)
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index f112af7aa62e..3d747070fe67 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -710,31 +710,29 @@ static void hpet_work(struct work_struct *w)
complete(&hpet_work->complete);
}
-static int hpet_cpuhp_notify(struct notifier_block *n,
- unsigned long action, void *hcpu)
+static int hpet_cpuhp_online(unsigned int cpu)
{
- unsigned long cpu = (unsigned long)hcpu;
struct hpet_work_struct work;
+
+ INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
+ init_completion(&work.complete);
+ /* FIXME: add schedule_work_on() */
+ schedule_delayed_work_on(cpu, &work.work, 0);
+ wait_for_completion(&work.complete);
+ destroy_delayed_work_on_stack(&work.work);
+ return 0;
+}
+
+static int hpet_cpuhp_dead(unsigned int cpu)
+{
struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
- init_completion(&work.complete);
- /* FIXME: add schedule_work_on() */
- schedule_delayed_work_on(cpu, &work.work, 0);
- wait_for_completion(&work.complete);
- destroy_delayed_work_on_stack(&work.work);
- break;
- case CPU_DEAD:
- if (hdev) {
- free_irq(hdev->irq, hdev);
- hdev->flags &= ~HPET_DEV_USED;
- per_cpu(cpu_hpet_dev, cpu) = NULL;
- }
- break;
- }
- return NOTIFY_OK;
+ if (!hdev)
+ return 0;
+ free_irq(hdev->irq, hdev);
+ hdev->flags &= ~HPET_DEV_USED;
+ per_cpu(cpu_hpet_dev, cpu) = NULL;
+ return 0;
}
#else
@@ -750,11 +748,8 @@ static void hpet_reserve_msi_timers(struct hpet_data *hd)
}
#endif
-static int hpet_cpuhp_notify(struct notifier_block *n,
- unsigned long action, void *hcpu)
-{
- return NOTIFY_OK;
-}
+#define hpet_cpuhp_online NULL
+#define hpet_cpuhp_dead NULL
#endif
@@ -931,7 +926,7 @@ out_nohpet:
*/
static __init int hpet_late_init(void)
{
- int cpu;
+ int ret;
if (boot_hpet_disable)
return -ENODEV;
@@ -961,16 +956,20 @@ static __init int hpet_late_init(void)
if (boot_cpu_has(X86_FEATURE_ARAT))
return 0;
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu) {
- hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
- }
-
/* This notifier should be called after workqueue is ready */
- __hotcpu_notifier(hpet_cpuhp_notify, -20);
- cpu_notifier_register_done();
-
+ ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "AP_X86_HPET_ONLINE",
+ hpet_cpuhp_online, NULL);
+ if (ret)
+ return ret;
+ ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "X86_HPET_DEAD", NULL,
+ hpet_cpuhp_dead);
+ if (ret)
+ goto err_cpuhp;
return 0;
+
+err_cpuhp:
+ cpuhp_remove_state(CPUHP_AP_X86_HPET_ONLINE);
+ return ret;
}
fs_initcall(hpet_late_init);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index a2616584b6e9..6cb2b02fcc87 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -400,10 +400,6 @@ static void __init reserve_initrd(void)
memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
}
-static void __init early_initrd_acpi_init(void)
-{
- early_acpi_table_init((void *)initrd_start, initrd_end - initrd_start);
-}
#else
static void __init early_reserve_initrd(void)
{
@@ -411,9 +407,6 @@ static void __init early_reserve_initrd(void)
static void __init reserve_initrd(void)
{
}
-static void __init early_initrd_acpi_init(void)
-{
-}
#endif /* CONFIG_BLK_DEV_INITRD */
static void __init parse_setup_data(void)
@@ -1149,7 +1142,7 @@ void __init setup_arch(char **cmdline_p)
reserve_initrd();
- early_initrd_acpi_init();
+ acpi_table_upgrade();
vsmp_init();
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index e4fcb87ba7a6..7a40e068302d 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -236,6 +236,8 @@ void __init setup_per_cpu_areas(void)
early_per_cpu_map(x86_cpu_to_apicid, cpu);
per_cpu(x86_bios_cpu_apicid, cpu) =
early_per_cpu_map(x86_bios_cpu_apicid, cpu);
+ per_cpu(x86_cpu_to_acpiid, cpu) =
+ early_per_cpu_map(x86_cpu_to_acpiid, cpu);
#endif
#ifdef CONFIG_X86_32
per_cpu(x86_cpu_to_logical_apicid, cpu) =
@@ -271,6 +273,7 @@ void __init setup_per_cpu_areas(void)
#ifdef CONFIG_X86_LOCAL_APIC
early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
+ early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL;
#endif
#ifdef CONFIG_X86_32
early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 9b0185fbe3eb..654f6c66fe45 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -323,25 +323,16 @@ static int tboot_wait_for_aps(int num_aps)
return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps);
}
-static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+static int tboot_dying_cpu(unsigned int cpu)
{
- switch (action) {
- case CPU_DYING:
- atomic_inc(&ap_wfs_count);
- if (num_online_cpus() == 1)
- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
- return NOTIFY_BAD;
- break;
+ atomic_inc(&ap_wfs_count);
+ if (num_online_cpus() == 1) {
+ if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
+ return -EBUSY;
}
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block tboot_cpu_notifier =
-{
- .notifier_call = tboot_cpu_callback,
-};
-
#ifdef CONFIG_DEBUG_FS
#define TBOOT_LOG_UUID { 0x26, 0x25, 0x19, 0xc0, 0x30, 0x6b, 0xb4, 0x4d, \
@@ -417,8 +408,8 @@ static __init int tboot_late_init(void)
tboot_create_trampoline();
atomic_set(&ap_wfs_count, 0);
- register_hotcpu_notifier(&tboot_cpu_notifier);
-
+ cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, "AP_X86_TBOOT_DYING", NULL,
+ tboot_dying_cpu);
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("tboot_log", S_IRUSR,
arch_debugfs_dir, NULL, &tboot_log_fops);