diff options
Diffstat (limited to 'arch/x86/include/asm/fpu')
| -rw-r--r-- | arch/x86/include/asm/fpu/api.h | 1 | ||||
| -rw-r--r-- | arch/x86/include/asm/fpu/internal.h | 91 | ||||
| -rw-r--r-- | arch/x86/include/asm/fpu/regset.h | 1 | ||||
| -rw-r--r-- | arch/x86/include/asm/fpu/signal.h | 1 | ||||
| -rw-r--r-- | arch/x86/include/asm/fpu/types.h | 33 | ||||
| -rw-r--r-- | arch/x86/include/asm/fpu/xstate.h | 13 | 
6 files changed, 42 insertions, 98 deletions
| diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index 0877ae018fc9..a9caac9d4a72 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */  /*   * Copyright (C) 1994 Linus Torvalds   * diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 554cdb205d17..a38bf5a1e37a 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */  /*   * Copyright (C) 1994 Linus Torvalds   * @@ -23,11 +24,9 @@  /*   * High level FPU state handling functions:   */ -extern void fpu__activate_curr(struct fpu *fpu); -extern void fpu__activate_fpstate_read(struct fpu *fpu); -extern void fpu__activate_fpstate_write(struct fpu *fpu); -extern void fpu__current_fpstate_write_begin(void); -extern void fpu__current_fpstate_write_end(void); +extern void fpu__initialize(struct fpu *fpu); +extern void fpu__prepare_read(struct fpu *fpu); +extern void fpu__prepare_write(struct fpu *fpu);  extern void fpu__save(struct fpu *fpu);  extern void fpu__restore(struct fpu *fpu);  extern int  fpu__restore_sig(void __user *buf, int ia32_frame); @@ -120,20 +119,11 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);  	err;								\  }) -#define check_insn(insn, output, input...)				\ -({									\ -	int err;							\ +#define kernel_insn(insn, output, input...)				\  	asm volatile("1:" #insn "\n\t"					\  		     "2:\n"						\ -		     ".section .fixup,\"ax\"\n"				\ -		     "3:  movl $-1,%[err]\n"				\ -		     "    jmp  2b\n"					\ -		     ".previous\n"					\ -		     _ASM_EXTABLE(1b, 3b)				\ -		     : [err] "=r" (err), output				\ -		     : "0"(0), input);					\ -	err;								\ -}) +		     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore)	\ +		     : output : input)  static inline int copy_fregs_to_user(struct fregs_state __user *fx)  { @@ -153,20 +143,16 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)  static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)  { -	int err; -  	if (IS_ENABLED(CONFIG_X86_32)) { -		err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); +		kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));  	} else {  		if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) { -			err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); +			kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));  		} else {  			/* See comment in copy_fxregs_to_kernel() below. */ -			err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); +			kernel_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));  		}  	} -	/* Copying from a kernel buffer to FPU registers should never fail: */ -	WARN_ON_FPU(err);  }  static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) @@ -183,9 +169,7 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)  static inline void copy_kernel_to_fregs(struct fregs_state *fx)  { -	int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); - -	WARN_ON_FPU(err); +	kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));  }  static inline int copy_user_to_fregs(struct fregs_state __user *fx) @@ -281,18 +265,13 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)   * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact   * XSAVE area format.   */ -#define XSTATE_XRESTORE(st, lmask, hmask, err)				\ +#define XSTATE_XRESTORE(st, lmask, hmask)				\  	asm volatile(ALTERNATIVE(XRSTOR,				\  				 XRSTORS, X86_FEATURE_XSAVES)		\  		     "\n"						\ -		     "xor %[err], %[err]\n"				\  		     "3:\n"						\ -		     ".pushsection .fixup,\"ax\"\n"			\ -		     "4: movl $-2, %[err]\n"				\ -		     "jmp 3b\n"						\ -		     ".popsection\n"					\ -		     _ASM_EXTABLE(661b, 4b)				\ -		     : [err] "=r" (err)					\ +		     _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\ +		     :							\  		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\  		     : "memory") @@ -336,7 +315,10 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)  	else  		XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); -	/* We should never fault when copying from a kernel buffer: */ +	/* +	 * We should never fault when copying from a kernel buffer, and the FPU +	 * state we set at boot time should be valid. +	 */  	WARN_ON_FPU(err);  } @@ -350,7 +332,7 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)  	u32 hmask = mask >> 32;  	int err; -	WARN_ON(!alternatives_patched); +	WARN_ON_FPU(!alternatives_patched);  	XSTATE_XSAVE(xstate, lmask, hmask, err); @@ -365,12 +347,8 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)  {  	u32 lmask = mask;  	u32 hmask = mask >> 32; -	int err; - -	XSTATE_XRESTORE(xstate, lmask, hmask, err); -	/* We should never fault when copying from a kernel buffer: */ -	WARN_ON_FPU(err); +	XSTATE_XRESTORE(xstate, lmask, hmask);  }  /* @@ -526,38 +504,17 @@ static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)   */  static inline void fpregs_deactivate(struct fpu *fpu)  { -	WARN_ON_FPU(!fpu->fpregs_active); - -	fpu->fpregs_active = 0;  	this_cpu_write(fpu_fpregs_owner_ctx, NULL);  	trace_x86_fpu_regs_deactivated(fpu);  }  static inline void fpregs_activate(struct fpu *fpu)  { -	WARN_ON_FPU(fpu->fpregs_active); - -	fpu->fpregs_active = 1;  	this_cpu_write(fpu_fpregs_owner_ctx, fpu);  	trace_x86_fpu_regs_activated(fpu);  }  /* - * The question "does this thread have fpu access?" - * is slightly racy, since preemption could come in - * and revoke it immediately after the test. - * - * However, even in that very unlikely scenario, - * we can just assume we have FPU access - typically - * to save the FP state - we'll just take a #NM - * fault and get the FPU access back. - */ -static inline int fpregs_active(void) -{ -	return current->thread.fpu.fpregs_active; -} - -/*   * FPU state switching for scheduling.   *   * This is a two-stage process: @@ -571,14 +528,13 @@ static inline int fpregs_active(void)  static inline void  switch_fpu_prepare(struct fpu *old_fpu, int cpu)  { -	if (old_fpu->fpregs_active) { +	if (old_fpu->initialized) {  		if (!copy_fpregs_to_fpstate(old_fpu))  			old_fpu->last_cpu = -1;  		else  			old_fpu->last_cpu = cpu;  		/* But leave fpu_fpregs_owner_ctx! */ -		old_fpu->fpregs_active = 0;  		trace_x86_fpu_regs_deactivated(old_fpu);  	} else  		old_fpu->last_cpu = -1; @@ -595,7 +551,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)  static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)  {  	bool preload = static_cpu_has(X86_FEATURE_FPU) && -		       new_fpu->fpstate_active; +		       new_fpu->initialized;  	if (preload) {  		if (!fpregs_state_valid(new_fpu, cpu)) @@ -617,8 +573,7 @@ static inline void user_fpu_begin(void)  	struct fpu *fpu = ¤t->thread.fpu;  	preempt_disable(); -	if (!fpregs_active()) -		fpregs_activate(fpu); +	fpregs_activate(fpu);  	preempt_enable();  } diff --git a/arch/x86/include/asm/fpu/regset.h b/arch/x86/include/asm/fpu/regset.h index 39d3107ac6c7..d5bdffb9d27f 100644 --- a/arch/x86/include/asm/fpu/regset.h +++ b/arch/x86/include/asm/fpu/regset.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */  /*   * FPU regset handling methods:   */ diff --git a/arch/x86/include/asm/fpu/signal.h b/arch/x86/include/asm/fpu/signal.h index 20a1fbf7fe4e..4df2754ef380 100644 --- a/arch/x86/include/asm/fpu/signal.h +++ b/arch/x86/include/asm/fpu/signal.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */  /*   * x86 FPU signal frame handling methods:   */ diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index 3c80f5b9c09d..202c53918ecf 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */  /*   * FPU data structures:   */ @@ -68,6 +69,9 @@ struct fxregs_state {  /* Default value for fxregs_state.mxcsr: */  #define MXCSR_DEFAULT		0x1f80 +/* Copy both mxcsr & mxcsr_flags with a single u64 memcpy: */ +#define MXCSR_AND_FLAGS_SIZE sizeof(u64) +  /*   * Software based FPU emulation state. This is arbitrary really,   * it matches the x87 format to make it easier to understand: @@ -290,36 +294,13 @@ struct fpu {  	unsigned int			last_cpu;  	/* -	 * @fpstate_active: +	 * @initialized:  	 * -	 * This flag indicates whether this context is active: if the task +	 * This flag indicates whether this context is initialized: if the task  	 * is not running then we can restore from this context, if the task  	 * is running then we should save into this context.  	 */ -	unsigned char			fpstate_active; - -	/* -	 * @fpregs_active: -	 * -	 * This flag determines whether a given context is actively -	 * loaded into the FPU's registers and that those registers -	 * represent the task's current FPU state. -	 * -	 * Note the interaction with fpstate_active: -	 * -	 *   # task does not use the FPU: -	 *   fpstate_active == 0 -	 * -	 *   # task uses the FPU and regs are active: -	 *   fpstate_active == 1 && fpregs_active == 1 -	 * -	 *   # the regs are inactive but still match fpstate: -	 *   fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu -	 * -	 * The third state is what we use for the lazy restore optimization -	 * on lazy-switching CPUs. -	 */ -	unsigned char			fpregs_active; +	unsigned char			initialized;  	/*  	 * @state: diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h index 1b2799e0699a..48581988d78c 100644 --- a/arch/x86/include/asm/fpu/xstate.h +++ b/arch/x86/include/asm/fpu/xstate.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */  #ifndef __ASM_X86_XSAVE_H  #define __ASM_X86_XSAVE_H @@ -48,8 +49,12 @@ void fpu__xstate_clear_all_cpu_caps(void);  void *get_xsave_addr(struct xregs_state *xsave, int xstate);  const void *get_xsave_field_ptr(int xstate_field);  int using_compacted_format(void); -int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, -			void __user *ubuf, struct xregs_state *xsave); -int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, -		     struct xregs_state *xsave); +int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size); +int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size); +int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf); +int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf); + +/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */ +extern int validate_xstate_header(const struct xstate_header *hdr); +  #endif |