diff options
-rw-r--r-- | arch/x86/kernel/cpu/microcode/core.c | 84 |
1 files changed, 84 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 001c0048e16a..1ff38f9fdb8a 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -290,6 +290,90 @@ static bool wait_for_cpus(atomic_t *cnt) return false; } +static bool wait_for_ctrl(void) +{ + unsigned int timeout; + + for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { + if (this_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT) + return true; + udelay(1); + if (!(timeout % 1000)) + touch_nmi_watchdog(); + } + return false; +} + +static __maybe_unused void load_secondary(unsigned int cpu) +{ + unsigned int ctrl_cpu = this_cpu_read(ucode_ctrl.ctrl_cpu); + enum ucode_state ret; + + /* Initial rendezvous to ensure that all CPUs have arrived */ + if (!wait_for_cpus(&late_cpus_in)) { + pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1); + this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); + return; + } + + /* + * Wait for primary threads to complete. If one of them hangs due + * to the update, there is no way out. This is non-recoverable + * because the CPU might hold locks or resources and confuse the + * scheduler, watchdogs etc. There is no way to safely evacuate the + * machine. + */ + if (!wait_for_ctrl()) + panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu); + + /* + * If the primary succeeded then invoke the apply() callback, + * otherwise copy the state from the primary thread. + */ + if (this_cpu_read(ucode_ctrl.ctrl) == SCTRL_APPLY) + ret = microcode_ops->apply_microcode(cpu); + else + ret = per_cpu(ucode_ctrl.result, ctrl_cpu); + + this_cpu_write(ucode_ctrl.result, ret); + this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); +} + +static __maybe_unused void load_primary(unsigned int cpu) +{ + struct cpumask *secondaries = topology_sibling_cpumask(cpu); + enum sibling_ctrl ctrl; + enum ucode_state ret; + unsigned int sibling; + + /* Initial rendezvous to ensure that all CPUs have arrived */ + if (!wait_for_cpus(&late_cpus_in)) { + this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); + pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1); + return; + } + + ret = microcode_ops->apply_microcode(cpu); + this_cpu_write(ucode_ctrl.result, ret); + this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); + + /* + * If the update was successful, let the siblings run the apply() + * callback. If not, tell them it's done. This also covers the + * case where the CPU has uniform loading at package or system + * scope implemented but does not advertise it. + */ + if (ret == UCODE_UPDATED || ret == UCODE_OK) + ctrl = SCTRL_APPLY; + else + ctrl = SCTRL_DONE; + + for_each_cpu(sibling, secondaries) { + if (sibling != cpu) + per_cpu(ucode_ctrl.ctrl, sibling) = ctrl; + } +} + static int load_cpus_stopped(void *unused) { int cpu = smp_processor_id(); |