diff options
Diffstat (limited to 'include/linux/vmalloc.h')
| -rw-r--r-- | include/linux/vmalloc.h | 67 | 
1 files changed, 48 insertions, 19 deletions
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index df92211cf771..394d03cc0e92 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -26,6 +26,7 @@ struct notifier_block;		/* in notifier.h */  #define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */  #define VM_FLUSH_RESET_PERMS	0x00000100	/* reset direct map and flush TLB on unmap, can't be freed in atomic context */  #define VM_MAP_PUT_PAGES	0x00000200	/* put pages and free array in vfree */ +#define VM_NO_HUGE_VMAP		0x00000400	/* force PAGE_SIZE pte mapping */  /*   * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC. @@ -54,6 +55,9 @@ struct vm_struct {  	unsigned long		size;  	unsigned long		flags;  	struct page		**pages; +#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC +	unsigned int		page_order; +#endif  	unsigned int		nr_pages;  	phys_addr_t		phys_addr;  	const void		*caller; @@ -78,6 +82,28 @@ struct vmap_area {  	};  }; +/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */ +#ifndef arch_vmap_p4d_supported +static inline bool arch_vmap_p4d_supported(pgprot_t prot) +{ +	return false; +} +#endif + +#ifndef arch_vmap_pud_supported +static inline bool arch_vmap_pud_supported(pgprot_t prot) +{ +	return false; +} +#endif + +#ifndef arch_vmap_pmd_supported +static inline bool arch_vmap_pmd_supported(pgprot_t prot) +{ +	return false; +} +#endif +  /*   *	Highlevel APIs for driver use   */ @@ -166,13 +192,27 @@ void free_vm_area(struct vm_struct *area);  extern struct vm_struct *remove_vm_area(const void *addr);  extern struct vm_struct *find_vm_area(const void *addr); +static inline bool is_vm_area_hugepages(const void *addr) +{ +	/* +	 * This may not 100% tell if the area is mapped with > PAGE_SIZE +	 * page table entries, if for some reason the architecture indicates +	 * larger sizes are available but decides not to use them, nothing +	 * prevents that. This only indicates the size of the physical page +	 * allocated in the vmalloc layer. +	 */ +#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC +	return find_vm_area(addr)->page_order > 0; +#else +	return false; +#endif +} +  #ifdef CONFIG_MMU -extern int map_kernel_range_noflush(unsigned long start, unsigned long size, -				    pgprot_t prot, struct page **pages); -int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, -		struct page **pages); -extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); -extern void unmap_kernel_range(unsigned long addr, unsigned long size); +int vmap_range(unsigned long addr, unsigned long end, +			phys_addr_t phys_addr, pgprot_t prot, +			unsigned int max_page_shift); +void vunmap_range(unsigned long addr, unsigned long end);  static inline void set_vm_flush_reset_perms(void *addr)  {  	struct vm_struct *vm = find_vm_area(addr); @@ -180,19 +220,8 @@ static inline void set_vm_flush_reset_perms(void *addr)  	if (vm)  		vm->flags |= VM_FLUSH_RESET_PERMS;  } +  #else -static inline int -map_kernel_range_noflush(unsigned long start, unsigned long size, -			pgprot_t prot, struct page **pages) -{ -	return size >> PAGE_SHIFT; -} -#define map_kernel_range map_kernel_range_noflush -static inline void -unmap_kernel_range_noflush(unsigned long addr, unsigned long size) -{ -} -#define unmap_kernel_range unmap_kernel_range_noflush  static inline void set_vm_flush_reset_perms(void *addr)  {  } @@ -241,7 +270,7 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)  int register_vmap_purge_notifier(struct notifier_block *nb);  int unregister_vmap_purge_notifier(struct notifier_block *nb); -#ifdef CONFIG_MMU +#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)  bool vmalloc_dump_obj(void *object);  #else  static inline bool vmalloc_dump_obj(void *object) { return false; }  |