aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel/fpu/core.c
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <[email protected]>2019-04-03 18:41:36 +0200
committerBorislav Petkov <[email protected]>2019-04-10 15:42:40 +0200
commit2722146eb78451b30e4717a267a3a2b44e4ad317 (patch)
tree9e1182428d9bcb6f6034d156c9a068ccd98f7cdb /arch/x86/kernel/fpu/core.c
parent39388e80f9b0c3788bfb6efe3054bdce0c3ead45 (diff)
x86/fpu: Remove fpu->initialized
The struct fpu.initialized member is always set to one for user tasks and zero for kernel tasks. This avoids saving/restoring the FPU registers for kernel threads. The ->initialized = 0 case for user tasks has been removed in previous changes, for instance, by doing an explicit unconditional init at fork() time for FPU-less systems which was otherwise delayed until the emulated opcode. The context switch code (switch_fpu_prepare() + switch_fpu_finish()) can't unconditionally save/restore registers for kernel threads. Not only would it slow down the switch but also load a zeroed xcomp_bv for XSAVES. For kernel_fpu_begin() (+end) the situation is similar: EFI with runtime services uses this before alternatives_patched is true. Which means that this function is used too early and it wasn't the case before. For those two cases, use current->mm to distinguish between user and kernel thread. For kernel_fpu_begin() skip save/restore of the FPU registers. During the context switch into a kernel thread don't do anything. There is no reason to save the FPU state of a kernel thread. The reordering in __switch_to() is important because the current() pointer needs to be valid before switch_fpu_finish() is invoked so ->mm is seen of the new task instead the old one. N.B.: fpu__save() doesn't need to check ->mm because it is called by user tasks only. [ bp: Massage. ] Signed-off-by: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Reviewed-by: Dave Hansen <[email protected]> Reviewed-by: Thomas Gleixner <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Aubrey Li <[email protected]> Cc: Babu Moger <[email protected]> Cc: "Chang S. Bae" <[email protected]> Cc: Dmitry Safonov <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Jann Horn <[email protected]> Cc: "Jason A. Donenfeld" <[email protected]> Cc: Joerg Roedel <[email protected]> Cc: kvm ML <[email protected]> Cc: Masami Hiramatsu <[email protected]> Cc: Mathieu Desnoyers <[email protected]> Cc: Nicolai Stange <[email protected]> Cc: Paolo Bonzini <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Radim Krčmář <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Sergey Senozhatsky <[email protected]> Cc: Will Deacon <[email protected]> Cc: x86-ml <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
Diffstat (limited to 'arch/x86/kernel/fpu/core.c')
-rw-r--r--arch/x86/kernel/fpu/core.c70
1 files changed, 20 insertions, 50 deletions
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index e43296854e37..97e27de2b7c0 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -101,7 +101,7 @@ static void __kernel_fpu_begin(void)
kernel_fpu_disable();
- if (fpu->initialized) {
+ if (current->mm) {
/*
* Ignore return value -- we don't care if reg state
* is clobbered.
@@ -116,7 +116,7 @@ static void __kernel_fpu_end(void)
{
struct fpu *fpu = &current->thread.fpu;
- if (fpu->initialized)
+ if (current->mm)
copy_kernel_to_fpregs(&fpu->state);
kernel_fpu_enable();
@@ -147,11 +147,10 @@ void fpu__save(struct fpu *fpu)
preempt_disable();
trace_x86_fpu_before_save(fpu);
- if (fpu->initialized) {
- if (!copy_fpregs_to_fpstate(fpu)) {
- copy_kernel_to_fpregs(&fpu->state);
- }
- }
+
+ if (!copy_fpregs_to_fpstate(fpu))
+ copy_kernel_to_fpregs(&fpu->state);
+
trace_x86_fpu_after_save(fpu);
preempt_enable();
}
@@ -190,7 +189,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
dst_fpu->last_cpu = -1;
- if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU))
+ if (!static_cpu_has(X86_FEATURE_FPU))
return 0;
WARN_ON_FPU(src_fpu != &current->thread.fpu);
@@ -227,14 +226,10 @@ static void fpu__initialize(struct fpu *fpu)
{
WARN_ON_FPU(fpu != &current->thread.fpu);
- if (!fpu->initialized) {
- fpstate_init(&fpu->state);
- trace_x86_fpu_init_state(fpu);
+ fpstate_init(&fpu->state);
+ trace_x86_fpu_init_state(fpu);
- trace_x86_fpu_activate_state(fpu);
- /* Safe to do for the current task: */
- fpu->initialized = 1;
- }
+ trace_x86_fpu_activate_state(fpu);
}
/*
@@ -247,32 +242,20 @@ static void fpu__initialize(struct fpu *fpu)
*
* - or it's called for stopped tasks (ptrace), in which case the
* registers were already saved by the context-switch code when
- * the task scheduled out - we only have to initialize the registers
- * if they've never been initialized.
+ * the task scheduled out.
*
* If the task has used the FPU before then save it.
*/
void fpu__prepare_read(struct fpu *fpu)
{
- if (fpu == &current->thread.fpu) {
+ if (fpu == &current->thread.fpu)
fpu__save(fpu);
- } else {
- if (!fpu->initialized) {
- fpstate_init(&fpu->state);
- trace_x86_fpu_init_state(fpu);
-
- trace_x86_fpu_activate_state(fpu);
- /* Safe to do for current and for stopped child tasks: */
- fpu->initialized = 1;
- }
- }
}
/*
* This function must be called before we write a task's fpstate.
*
- * If the task has used the FPU before then invalidate any cached FPU registers.
- * If the task has not used the FPU before then initialize its fpstate.
+ * Invalidate any cached FPU registers.
*
* After this function call, after registers in the fpstate are
* modified and the child task has woken up, the child task will
@@ -289,17 +272,8 @@ void fpu__prepare_write(struct fpu *fpu)
*/
WARN_ON_FPU(fpu == &current->thread.fpu);
- if (fpu->initialized) {
- /* Invalidate any cached state: */
- __fpu_invalidate_fpregs_state(fpu);
- } else {
- fpstate_init(&fpu->state);
- trace_x86_fpu_init_state(fpu);
-
- trace_x86_fpu_activate_state(fpu);
- /* Safe to do for stopped child tasks: */
- fpu->initialized = 1;
- }
+ /* Invalidate any cached state: */
+ __fpu_invalidate_fpregs_state(fpu);
}
/*
@@ -316,17 +290,13 @@ void fpu__drop(struct fpu *fpu)
preempt_disable();
if (fpu == &current->thread.fpu) {
- if (fpu->initialized) {
- /* Ignore delayed exceptions from user space */
- asm volatile("1: fwait\n"
- "2:\n"
- _ASM_EXTABLE(1b, 2b));
- fpregs_deactivate(fpu);
- }
+ /* Ignore delayed exceptions from user space */
+ asm volatile("1: fwait\n"
+ "2:\n"
+ _ASM_EXTABLE(1b, 2b));
+ fpregs_deactivate(fpu);
}
- fpu->initialized = 0;
-
trace_x86_fpu_dropped(fpu);
preempt_enable();