aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel/fpu/signal.c
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <[email protected]>2019-04-03 18:41:30 +0200
committerBorislav Petkov <[email protected]>2019-04-09 19:27:29 +0200
commit39ea9baffda91df8bfee9b45610242a3191ea1ec (patch)
tree5a5a250a43b14094afd5920d286ede9a3c7b90a1 /arch/x86/kernel/fpu/signal.c
parent89833fab15d6017ba006a45b5af68caa067171a7 (diff)
x86/fpu: Remove fpu->initialized usage in __fpu__restore_sig()
This is a preparation for the removal of the ->initialized member in the fpu struct. __fpu__restore_sig() is deactivating the FPU via fpu__drop() and then setting manually ->initialized followed by fpu__restore(). The result is that it is possible to manipulate fpu->state and the state of registers won't be saved/restored on a context switch which would overwrite fpu->state: fpu__drop(fpu): ... fpu->initialized = 0; preempt_enable(); <--- context switch Don't access the fpu->state while the content is read from user space and examined/sanitized. Use a temporary kmalloc() buffer for the preparation of the FPU registers and once the state is considered okay, load it. Should something go wrong, return with an error and without altering the original FPU registers. The removal of fpu__initialize() is a nop because fpu->initialized is already set for the user task. [ bp: Massage a bit. ] Signed-off-by: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Reviewed-by: Dave Hansen <[email protected]> Reviewed-by: Thomas Gleixner <[email protected]> Acked-by: Borislav Petkov <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Jann Horn <[email protected]> Cc: "Jason A. Donenfeld" <[email protected]> Cc: kvm ML <[email protected]> Cc: Paolo Bonzini <[email protected]> Cc: Radim Krčmář <[email protected]> Cc: Rik van Riel <[email protected]> Cc: x86-ml <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
Diffstat (limited to 'arch/x86/kernel/fpu/signal.c')
-rw-r--r--arch/x86/kernel/fpu/signal.c40
1 files changed, 15 insertions, 25 deletions
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 55b80de13ea5..7296a9bb78e7 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -207,11 +207,11 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
}
static inline void
-sanitize_restored_xstate(struct task_struct *tsk,
+sanitize_restored_xstate(union fpregs_state *state,
struct user_i387_ia32_struct *ia32_env,
u64 xfeatures, int fx_only)
{
- struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
+ struct xregs_state *xsave = &state->xsave;
struct xstate_header *header = &xsave->header;
if (use_xsave()) {
@@ -238,7 +238,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
*/
xsave->i387.mxcsr &= mxcsr_feature_mask;
- convert_to_fxsr(tsk, ia32_env);
+ convert_to_fxsr(&state->fxsave, ia32_env);
}
}
@@ -284,8 +284,6 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
if (!access_ok(buf, size))
return -EACCES;
- fpu__initialize(fpu);
-
if (!static_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_set(current, NULL,
0, sizeof(struct user_i387_ia32_struct),
@@ -315,40 +313,32 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
* header. Validate and sanitize the copied state.
*/
struct user_i387_ia32_struct env;
+ union fpregs_state *state;
int err = 0;
+ void *tmp;
- /*
- * Drop the current fpu which clears fpu->initialized. This ensures
- * that any context-switch during the copy of the new state,
- * avoids the intermediate state from getting restored/saved.
- * Thus avoiding the new restored state from getting corrupted.
- * We will be ready to restore/save the state only after
- * fpu->initialized is again set.
- */
- fpu__drop(fpu);
+ tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ state = PTR_ALIGN(tmp, 64);
if (using_compacted_format()) {
- err = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
+ err = copy_user_to_xstate(&state->xsave, buf_fx);
} else {
- err = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
+ err = __copy_from_user(&state->xsave, buf_fx, state_size);
if (!err && state_size > offsetof(struct xregs_state, header))
- err = validate_xstate_header(&fpu->state.xsave.header);
+ err = validate_xstate_header(&state->xsave.header);
}
if (err || __copy_from_user(&env, buf, sizeof(env))) {
- fpstate_init(&fpu->state);
- trace_x86_fpu_init_state(fpu);
err = -1;
} else {
- sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
+ sanitize_restored_xstate(state, &env, xfeatures, fx_only);
+ copy_kernel_to_fpregs(state);
}
- local_bh_disable();
- fpu->initialized = 1;
- fpu__restore(fpu);
- local_bh_enable();
-
+ kfree(tmp);
return err;
} else {
/*