diff options
Diffstat (limited to 'arch/arm64/include/asm/uaccess.h')
| -rw-r--r-- | arch/arm64/include/asm/uaccess.h | 193 | 
1 files changed, 140 insertions, 53 deletions
| diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index fc0f9eb66039..e66b0fca99c2 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -35,16 +35,20 @@  #include <asm/compiler.h>  #include <asm/extable.h> -#define KERNEL_DS	(-1UL)  #define get_ds()	(KERNEL_DS) - -#define USER_DS		TASK_SIZE_64  #define get_fs()	(current_thread_info()->addr_limit)  static inline void set_fs(mm_segment_t fs)  {  	current_thread_info()->addr_limit = fs; +	/* +	 * Prevent a mispredicted conditional call to set_fs from forwarding +	 * the wrong address limit to access_ok under speculation. +	 */ +	dsb(nsh); +	isb(); +  	/* On user-mode return, check fs is correct */  	set_thread_flag(TIF_FSCHECK); @@ -66,22 +70,32 @@ static inline void set_fs(mm_segment_t fs)   * Returns 1 if the range is valid, 0 otherwise.   *   * This is equivalent to the following test: - * (u65)addr + (u65)size <= current->addr_limit - * - * This needs 65-bit arithmetic. + * (u65)addr + (u65)size <= (u65)current->addr_limit + 1   */ -#define __range_ok(addr, size)						\ -({									\ -	unsigned long __addr = (unsigned long)(addr);			\ -	unsigned long flag, roksum;					\ -	__chk_user_ptr(addr);						\ -	asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls"		\ -		: "=&r" (flag), "=&r" (roksum)				\ -		: "1" (__addr), "Ir" (size),				\ -		  "r" (current_thread_info()->addr_limit)		\ -		: "cc");						\ -	flag;								\ -}) +static inline unsigned long __range_ok(const void __user *addr, unsigned long size) +{ +	unsigned long ret, limit = current_thread_info()->addr_limit; + +	__chk_user_ptr(addr); +	asm volatile( +	// A + B <= C + 1 for all A,B,C, in four easy steps: +	// 1: X = A + B; X' = X % 2^64 +	"	adds	%0, %3, %2\n" +	// 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 +	"	csel	%1, xzr, %1, hi\n" +	// 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' +	//    to compensate for the carry flag being set in step 4. For +	//    X > 2^64, X' merely has to remain nonzero, which it does. +	"	csinv	%0, %0, xzr, cc\n" +	// 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1 +	//    comes from the carry in being clear. Otherwise, we are +	//    testing X' - C == 0, subject to the previous adjustments. +	"	sbcs	xzr, %0, %1\n" +	"	cset	%0, ls\n" +	: "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc"); + +	return ret; +}  /*   * When dealing with data aborts, watchpoints, or instruction traps we may end @@ -105,17 +119,23 @@ static inline void set_fs(mm_segment_t fs)  #ifdef CONFIG_ARM64_SW_TTBR0_PAN  static inline void __uaccess_ttbr0_disable(void)  { -	unsigned long ttbr; +	unsigned long flags, ttbr; -	/* reserved_ttbr0 placed at the end of swapper_pg_dir */ -	ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE; -	write_sysreg(ttbr, ttbr0_el1); +	local_irq_save(flags); +	ttbr = read_sysreg(ttbr1_el1); +	ttbr &= ~TTBR_ASID_MASK; +	/* reserved_ttbr0 placed before swapper_pg_dir */ +	write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1); +	isb(); +	/* Set reserved ASID */ +	write_sysreg(ttbr, ttbr1_el1);  	isb(); +	local_irq_restore(flags);  }  static inline void __uaccess_ttbr0_enable(void)  { -	unsigned long flags; +	unsigned long flags, ttbr0, ttbr1;  	/*  	 * Disable interrupts to avoid preemption between reading the 'ttbr0' @@ -123,7 +143,17 @@ static inline void __uaccess_ttbr0_enable(void)  	 * roll-over and an update of 'ttbr0'.  	 */  	local_irq_save(flags); -	write_sysreg(current_thread_info()->ttbr0, ttbr0_el1); +	ttbr0 = READ_ONCE(current_thread_info()->ttbr0); + +	/* Restore active ASID */ +	ttbr1 = read_sysreg(ttbr1_el1); +	ttbr1 &= ~TTBR_ASID_MASK;		/* safety measure */ +	ttbr1 |= ttbr0 & TTBR_ASID_MASK; +	write_sysreg(ttbr1, ttbr1_el1); +	isb(); + +	/* Restore user page table */ +	write_sysreg(ttbr0, ttbr0_el1);  	isb();  	local_irq_restore(flags);  } @@ -155,6 +185,18 @@ static inline bool uaccess_ttbr0_enable(void)  }  #endif +static inline void __uaccess_disable_hw_pan(void) +{ +	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, +			CONFIG_ARM64_PAN)); +} + +static inline void __uaccess_enable_hw_pan(void) +{ +	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, +			CONFIG_ARM64_PAN)); +} +  #define __uaccess_disable(alt)						\  do {									\  	if (!uaccess_ttbr0_disable())					\ @@ -193,6 +235,26 @@ static inline void uaccess_enable_not_uao(void)  }  /* + * Sanitise a uaccess pointer such that it becomes NULL if above the + * current addr_limit. + */ +#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) +static inline void __user *__uaccess_mask_ptr(const void __user *ptr) +{ +	void __user *safe_ptr; + +	asm volatile( +	"	bics	xzr, %1, %2\n" +	"	csel	%0, %1, xzr, eq\n" +	: "=&r" (safe_ptr) +	: "r" (ptr), "r" (current_thread_info()->addr_limit) +	: "cc"); + +	csdb(); +	return safe_ptr; +} + +/*   * The "__xxx" versions of the user access functions do not verify the address   * space - it must have been done previously with a separate "access_ok()"   * call. @@ -244,28 +306,33 @@ do {									\  	(x) = (__force __typeof__(*(ptr)))__gu_val;			\  } while (0) -#define __get_user(x, ptr)						\ +#define __get_user_check(x, ptr, err)					\  ({									\ -	int __gu_err = 0;						\ -	__get_user_err((x), (ptr), __gu_err);				\ -	__gu_err;							\ +	__typeof__(*(ptr)) __user *__p = (ptr);				\ +	might_fault();							\ +	if (access_ok(VERIFY_READ, __p, sizeof(*__p))) {		\ +		__p = uaccess_mask_ptr(__p);				\ +		__get_user_err((x), __p, (err));			\ +	} else {							\ +		(x) = 0; (err) = -EFAULT;				\ +	}								\  })  #define __get_user_error(x, ptr, err)					\  ({									\ -	__get_user_err((x), (ptr), (err));				\ +	__get_user_check((x), (ptr), (err));				\  	(void)0;							\  }) -#define get_user(x, ptr)						\ +#define __get_user(x, ptr)						\  ({									\ -	__typeof__(*(ptr)) __user *__p = (ptr);				\ -	might_fault();							\ -	access_ok(VERIFY_READ, __p, sizeof(*__p)) ?			\ -		__get_user((x), __p) :					\ -		((x) = 0, -EFAULT);					\ +	int __gu_err = 0;						\ +	__get_user_check((x), (ptr), __gu_err);				\ +	__gu_err;							\  }) +#define get_user	__get_user +  #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature)	\  	asm volatile(							\  	"1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",			\ @@ -308,43 +375,63 @@ do {									\  	uaccess_disable_not_uao();					\  } while (0) -#define __put_user(x, ptr)						\ +#define __put_user_check(x, ptr, err)					\  ({									\ -	int __pu_err = 0;						\ -	__put_user_err((x), (ptr), __pu_err);				\ -	__pu_err;							\ +	__typeof__(*(ptr)) __user *__p = (ptr);				\ +	might_fault();							\ +	if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) {		\ +		__p = uaccess_mask_ptr(__p);				\ +		__put_user_err((x), __p, (err));			\ +	} else	{							\ +		(err) = -EFAULT;					\ +	}								\  })  #define __put_user_error(x, ptr, err)					\  ({									\ -	__put_user_err((x), (ptr), (err));				\ +	__put_user_check((x), (ptr), (err));				\  	(void)0;							\  }) -#define put_user(x, ptr)						\ +#define __put_user(x, ptr)						\  ({									\ -	__typeof__(*(ptr)) __user *__p = (ptr);				\ -	might_fault();							\ -	access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ?			\ -		__put_user((x), __p) :					\ -		-EFAULT;						\ +	int __pu_err = 0;						\ +	__put_user_check((x), (ptr), __pu_err);				\ +	__pu_err;							\  }) +#define put_user	__put_user +  extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); -#define raw_copy_from_user __arch_copy_from_user +#define raw_copy_from_user(to, from, n)					\ +({									\ +	__arch_copy_from_user((to), __uaccess_mask_ptr(from), (n));	\ +}) +  extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); -#define raw_copy_to_user __arch_copy_to_user -extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); +#define raw_copy_to_user(to, from, n)					\ +({									\ +	__arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n));	\ +}) + +extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); +#define raw_copy_in_user(to, from, n)					\ +({									\ +	__arch_copy_in_user(__uaccess_mask_ptr(to),			\ +			    __uaccess_mask_ptr(from), (n));		\ +}) +  #define INLINE_COPY_TO_USER  #define INLINE_COPY_FROM_USER -static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) +extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); +static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)  {  	if (access_ok(VERIFY_WRITE, to, n)) -		n = __clear_user(to, n); +		n = __arch_clear_user(__uaccess_mask_ptr(to), n);  	return n;  } +#define clear_user	__clear_user  extern long strncpy_from_user(char *dest, const char __user *src, long count); @@ -358,7 +445,7 @@ extern unsigned long __must_check __copy_user_flushcache(void *to, const void __  static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)  {  	kasan_check_write(dst, size); -	return __copy_user_flushcache(dst, src, size); +	return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);  }  #endif |