diff options
| author | Bartlomiej Zolnierkiewicz <[email protected]> | 2018-09-26 15:54:31 +0200 |
|---|---|---|
| committer | Bartlomiej Zolnierkiewicz <[email protected]> | 2018-09-26 15:54:31 +0200 |
| commit | aaccf3c97418f169afdbb5855e9cbcbda34e90fd (patch) | |
| tree | 5d4207e67958bdbc23288cf30178692f5534e1a0 /arch/powerpc/kernel/smp.c | |
| parent | f39684524b391c5a7ed0ac44db4fec3357af1c5d (diff) | |
| parent | 6bf4ca7fbc85d80446ac01c0d1d77db4d91a6d84 (diff) | |
Merge tag 'v4.19-rc5' of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux into fbdev-for-next
Sync with upstream (which now contains fbdev-v4.19 changes) to
prepare a base for fbdev-v4.20 changes.
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
| -rw-r--r-- | arch/powerpc/kernel/smp.c | 55 |
1 files changed, 32 insertions, 23 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 4794d6b4f4d2..61c1fadbc644 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -423,7 +423,8 @@ int smp_handle_nmi_ipi(struct pt_regs *regs) fn(regs); nmi_ipi_lock(); - nmi_ipi_busy_count--; + if (nmi_ipi_busy_count > 1) /* Can race with caller time-out */ + nmi_ipi_busy_count--; out: nmi_ipi_unlock_end(&flags); @@ -448,29 +449,11 @@ static void do_smp_send_nmi_ipi(int cpu, bool safe) } } -void smp_flush_nmi_ipi(u64 delay_us) -{ - unsigned long flags; - - nmi_ipi_lock_start(&flags); - while (nmi_ipi_busy_count) { - nmi_ipi_unlock_end(&flags); - udelay(1); - if (delay_us) { - delay_us--; - if (!delay_us) - return; - } - nmi_ipi_lock_start(&flags); - } - nmi_ipi_unlock_end(&flags); -} - /* * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS. * - fn is the target callback function. * - delay_us > 0 is the delay before giving up waiting for targets to - * enter the handler, == 0 specifies indefinite delay. + * complete executing the handler, == 0 specifies indefinite delay. */ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe) { @@ -507,8 +490,23 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool do_smp_send_nmi_ipi(cpu, safe); + nmi_ipi_lock(); + /* nmi_ipi_busy_count is held here, so unlock/lock is okay */ while (!cpumask_empty(&nmi_ipi_pending_mask)) { + nmi_ipi_unlock(); udelay(1); + nmi_ipi_lock(); + if (delay_us) { + delay_us--; + if (!delay_us) + break; + } + } + + while (nmi_ipi_busy_count > 1) { + nmi_ipi_unlock(); + udelay(1); + nmi_ipi_lock(); if (delay_us) { delay_us--; if (!delay_us) @@ -516,12 +514,17 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool } } - nmi_ipi_lock(); if (!cpumask_empty(&nmi_ipi_pending_mask)) { - /* Could not gather all CPUs */ + /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */ ret = 0; cpumask_clear(&nmi_ipi_pending_mask); } + if (nmi_ipi_busy_count > 1) { + /* Timeout waiting for CPUs to execute fn */ + ret = 0; + nmi_ipi_busy_count = 1; + } + nmi_ipi_busy_count--; nmi_ipi_unlock_end(&flags); @@ -597,7 +600,8 @@ static void nmi_stop_this_cpu(struct pt_regs *regs) * IRQs are already hard disabled by the smp_handle_nmi_ipi. */ nmi_ipi_lock(); - nmi_ipi_busy_count--; + if (nmi_ipi_busy_count > 1) + nmi_ipi_busy_count--; nmi_ipi_unlock(); spin_begin(); @@ -1156,6 +1160,11 @@ void __init smp_cpus_done(unsigned int max_cpus) if (smp_ops && smp_ops->bringup_done) smp_ops->bringup_done(); + /* + * On a shared LPAR, associativity needs to be requested. + * Hence, get numa topology before dumping cpu topology + */ + shared_proc_topology_init(); dump_numa_cpu_topology(); /* |