diff options
Diffstat (limited to 'include/linux/mm.h')
| -rw-r--r-- | include/linux/mm.h | 47 | 
1 files changed, 39 insertions, 8 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 20e6d1dde412..bf5d0b1b16f4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -319,11 +319,13 @@ extern unsigned int kobjsize(const void *objp);  #define VM_HIGH_ARCH_BIT_2	34	/* bit only usable on 64-bit architectures */  #define VM_HIGH_ARCH_BIT_3	35	/* bit only usable on 64-bit architectures */  #define VM_HIGH_ARCH_BIT_4	36	/* bit only usable on 64-bit architectures */ +#define VM_HIGH_ARCH_BIT_5	37	/* bit only usable on 64-bit architectures */  #define VM_HIGH_ARCH_0	BIT(VM_HIGH_ARCH_BIT_0)  #define VM_HIGH_ARCH_1	BIT(VM_HIGH_ARCH_BIT_1)  #define VM_HIGH_ARCH_2	BIT(VM_HIGH_ARCH_BIT_2)  #define VM_HIGH_ARCH_3	BIT(VM_HIGH_ARCH_BIT_3)  #define VM_HIGH_ARCH_4	BIT(VM_HIGH_ARCH_BIT_4) +#define VM_HIGH_ARCH_5	BIT(VM_HIGH_ARCH_BIT_5)  #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */  #ifdef CONFIG_ARCH_HAS_PKEYS @@ -339,6 +341,21 @@ extern unsigned int kobjsize(const void *objp);  #endif  #endif /* CONFIG_ARCH_HAS_PKEYS */ +#ifdef CONFIG_X86_USER_SHADOW_STACK +/* + * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of + * support core mm. + * + * These VMAs will get a single end guard page. This helps userspace protect + * itself from attacks. A single page is enough for current shadow stack archs + * (x86). See the comments near alloc_shstk() in arch/x86/kernel/shstk.c + * for more details on the guard size. + */ +# define VM_SHADOW_STACK	VM_HIGH_ARCH_5 +#else +# define VM_SHADOW_STACK	VM_NONE +#endif +  #if defined(CONFIG_X86)  # define VM_PAT		VM_ARCH_1	/* PAT reserves whole VMA at once (x86) */  #elif defined(CONFIG_PPC) @@ -370,7 +387,7 @@ extern unsigned int kobjsize(const void *objp);  #endif  #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR -# define VM_UFFD_MINOR_BIT	37 +# define VM_UFFD_MINOR_BIT	38  # define VM_UFFD_MINOR		BIT(VM_UFFD_MINOR_BIT)	/* UFFD minor faults */  #else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */  # define VM_UFFD_MINOR		VM_NONE @@ -397,6 +414,8 @@ extern unsigned int kobjsize(const void *objp);  #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS  #endif +#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK) +  #ifdef CONFIG_STACK_GROWSUP  #define VM_STACK	VM_GROWSUP  #define VM_STACK_EARLY	VM_GROWSDOWN @@ -1309,7 +1328,7 @@ static inline unsigned long thp_size(struct page *page)  static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)  {  	if (likely(vma->vm_flags & VM_WRITE)) -		pte = pte_mkwrite(pte); +		pte = pte_mkwrite(pte, vma);  	return pte;  } @@ -3265,7 +3284,8 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr,  	struct list_head *uf);  extern unsigned long do_mmap(struct file *file, unsigned long addr,  	unsigned long len, unsigned long prot, unsigned long flags, -	unsigned long pgoff, unsigned long *populate, struct list_head *uf); +	vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, +	struct list_head *uf);  extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,  			 unsigned long start, size_t len, struct list_head *uf,  			 bool unlock); @@ -3353,15 +3373,26 @@ struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)  	return mtree_load(&mm->mm_mt, addr);  } +static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma) +{ +	if (vma->vm_flags & VM_GROWSDOWN) +		return stack_guard_gap; + +	/* See reasoning around the VM_SHADOW_STACK definition */ +	if (vma->vm_flags & VM_SHADOW_STACK) +		return PAGE_SIZE; + +	return 0; +} +  static inline unsigned long vm_start_gap(struct vm_area_struct *vma)  { +	unsigned long gap = stack_guard_start_gap(vma);  	unsigned long vm_start = vma->vm_start; -	if (vma->vm_flags & VM_GROWSDOWN) { -		vm_start -= stack_guard_gap; -		if (vm_start > vma->vm_start) -			vm_start = 0; -	} +	vm_start -= gap; +	if (vm_start > vma->vm_start) +		vm_start = 0;  	return vm_start;  }  |