diff options
Diffstat (limited to 'include/linux/mm.h')
| -rw-r--r-- | include/linux/mm.h | 169 | 
1 files changed, 66 insertions, 103 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index a7e4a9e7d807..aa47705191bc 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -424,51 +424,6 @@ extern unsigned int kobjsize(const void *objp);   */  extern pgprot_t protection_map[16]; -/** - * enum fault_flag - Fault flag definitions. - * @FAULT_FLAG_WRITE: Fault was a write fault. - * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE. - * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked. - * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying. - * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region. - * @FAULT_FLAG_TRIED: The fault has been tried once. - * @FAULT_FLAG_USER: The fault originated in userspace. - * @FAULT_FLAG_REMOTE: The fault is not for current task/mm. - * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch. - * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals. - * - * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify - * whether we would allow page faults to retry by specifying these two - * fault flags correctly.  Currently there can be three legal combinations: - * - * (a) ALLOW_RETRY and !TRIED:  this means the page fault allows retry, and - *                              this is the first try - * - * (b) ALLOW_RETRY and TRIED:   this means the page fault allows retry, and - *                              we've already tried at least once - * - * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry - * - * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never - * be used.  Note that page faults can be allowed to retry for multiple times, - * in which case we'll have an initial fault with flags (a) then later on - * continuous faults with flags (b).  We should always try to detect pending - * signals before a retry to make sure the continuous page faults can still be - * interrupted if necessary. - */ -enum fault_flag { -	FAULT_FLAG_WRITE =		1 << 0, -	FAULT_FLAG_MKWRITE =		1 << 1, -	FAULT_FLAG_ALLOW_RETRY =	1 << 2, -	FAULT_FLAG_RETRY_NOWAIT = 	1 << 3, -	FAULT_FLAG_KILLABLE =		1 << 4, -	FAULT_FLAG_TRIED = 		1 << 5, -	FAULT_FLAG_USER =		1 << 6, -	FAULT_FLAG_REMOTE =		1 << 7, -	FAULT_FLAG_INSTRUCTION =	1 << 8, -	FAULT_FLAG_INTERRUPTIBLE =	1 << 9, -}; -  /*   * The default fault flags that should be used by most of the   * arch-specific page fault handlers. @@ -577,6 +532,10 @@ enum page_entry_size {   */  struct vm_operations_struct {  	void (*open)(struct vm_area_struct * area); +	/** +	 * @close: Called when the VMA is being removed from the MM. +	 * Context: User context.  May sleep.  Caller holds mmap_lock. +	 */  	void (*close)(struct vm_area_struct * area);  	/* Called any time before splitting to check if it's allowed */  	int (*may_split)(struct vm_area_struct *area, unsigned long addr); @@ -714,6 +673,27 @@ int vma_is_stack_for_current(struct vm_area_struct *vma);  struct mmu_gather;  struct inode; +static inline unsigned int compound_order(struct page *page) +{ +	if (!PageHead(page)) +		return 0; +	return page[1].compound_order; +} + +/** + * folio_order - The allocation order of a folio. + * @folio: The folio. + * + * A folio is composed of 2^order pages.  See get_order() for the definition + * of order. + * + * Return: The order of the folio. + */ +static inline unsigned int folio_order(struct folio *folio) +{ +	return compound_order(&folio->page); +} +  #include <linux/huge_mm.h>  /* @@ -840,19 +820,15 @@ static inline int page_mapcount(struct page *page)  #ifdef CONFIG_TRANSPARENT_HUGEPAGE  int total_mapcount(struct page *page); -int page_trans_huge_mapcount(struct page *page, int *total_mapcount); +int page_trans_huge_mapcount(struct page *page);  #else  static inline int total_mapcount(struct page *page)  {  	return page_mapcount(page);  } -static inline int page_trans_huge_mapcount(struct page *page, -					   int *total_mapcount) +static inline int page_trans_huge_mapcount(struct page *page)  { -	int mapcount = page_mapcount(page); -	if (total_mapcount) -		*total_mapcount = mapcount; -	return mapcount; +	return page_mapcount(page);  }  #endif @@ -863,6 +839,13 @@ static inline struct page *virt_to_head_page(const void *x)  	return compound_head(page);  } +static inline struct folio *virt_to_folio(const void *x) +{ +	struct page *page = virt_to_page(x); + +	return page_folio(page); +} +  void __put_page(struct page *page);  void put_pages_list(struct list_head *pages); @@ -906,27 +889,6 @@ static inline void destroy_compound_page(struct page *page)  	compound_page_dtors[page[1].compound_dtor](page);  } -static inline unsigned int compound_order(struct page *page) -{ -	if (!PageHead(page)) -		return 0; -	return page[1].compound_order; -} - -/** - * folio_order - The allocation order of a folio. - * @folio: The folio. - * - * A folio is composed of 2^order pages.  See get_order() for the definition - * of order. - * - * Return: The order of the folio. - */ -static inline unsigned int folio_order(struct folio *folio) -{ -	return compound_order(&folio->page); -} -  static inline bool hpage_pincount_available(struct page *page)  {  	/* @@ -1753,6 +1715,11 @@ void page_address_init(void);  #define page_address_init()  do { } while(0)  #endif +static inline void *folio_address(const struct folio *folio) +{ +	return page_address(&folio->page); +} +  extern void *page_rmapping(struct page *page);  extern struct anon_vma *page_anon_vma(struct page *page);  extern pgoff_t __page_file_index(struct page *page); @@ -1825,28 +1792,6 @@ static inline bool can_do_mlock(void) { return false; }  extern int user_shm_lock(size_t, struct ucounts *);  extern void user_shm_unlock(size_t, struct ucounts *); -/* - * Parameter block passed down to zap_pte_range in exceptional cases. - */ -struct zap_details { -	struct address_space *zap_mapping;	/* Check page->mapping if set */ -	struct page *single_page;		/* Locked page to be unmapped */ -}; - -/* - * We set details->zap_mappings when we want to unmap shared but keep private - * pages. Return true if skip zapping this page, false otherwise. - */ -static inline bool -zap_skip_check_mapping(struct zap_details *details, struct page *page) -{ -	if (!details || !page) -		return false; - -	return details->zap_mapping && -	    (details->zap_mapping != page_rmapping(page)); -} -  struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,  			     pte_t pte);  struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, @@ -1881,7 +1826,6 @@ extern void truncate_pagecache(struct inode *inode, loff_t new);  extern void truncate_setsize(struct inode *inode, loff_t newsize);  void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);  void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); -int truncate_inode_page(struct address_space *mapping, struct page *page);  int generic_error_remove_page(struct address_space *mapping, struct page *page);  int invalidate_inode_page(struct page *page); @@ -1892,7 +1836,6 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,  extern int fixup_user_fault(struct mm_struct *mm,  			    unsigned long address, unsigned int fault_flags,  			    bool *unlocked); -void unmap_mapping_page(struct page *page);  void unmap_mapping_pages(struct address_space *mapping,  		pgoff_t start, pgoff_t nr, bool even_cows);  void unmap_mapping_range(struct address_space *mapping, @@ -1913,7 +1856,6 @@ static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,  	BUG();  	return -EFAULT;  } -static inline void unmap_mapping_page(struct page *page) { }  static inline void unmap_mapping_pages(struct address_space *mapping,  		pgoff_t start, pgoff_t nr, bool even_cows) { }  static inline void unmap_mapping_range(struct address_space *mapping, @@ -1970,7 +1912,6 @@ int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,  			struct page **pages);  struct page *get_dump_page(unsigned long addr); -extern int try_to_release_page(struct page * page, gfp_t gfp_mask);  extern void do_invalidatepage(struct page *page, unsigned int offset,  			      unsigned int length); @@ -2658,7 +2599,7 @@ static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,  extern struct vm_area_struct *vma_merge(struct mm_struct *,  	struct vm_area_struct *prev, unsigned long addr, unsigned long end,  	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, -	struct mempolicy *, struct vm_userfaultfd_ctx); +	struct mempolicy *, struct vm_userfaultfd_ctx, const char *);  extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);  extern int __split_vma(struct mm_struct *, struct vm_area_struct *,  	unsigned long addr, int new_below); @@ -3167,7 +3108,6 @@ int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,  #endif  void drop_slab(void); -void drop_slab_node(int nid);  #ifndef CONFIG_MMU  #define randomize_va_space 0 @@ -3220,6 +3160,7 @@ enum mf_flags {  	MF_ACTION_REQUIRED = 1 << 1,  	MF_MUST_KILL = 1 << 2,  	MF_SOFT_OFFLINE = 1 << 3, +	MF_UNPOISON = 1 << 4,  };  extern int memory_failure(unsigned long pfn, int flags);  extern void memory_failure_queue(unsigned long pfn, int flags); @@ -3231,6 +3172,19 @@ extern void shake_page(struct page *p);  extern atomic_long_t num_poisoned_pages __read_mostly;  extern int soft_offline_page(unsigned long pfn, int flags); +#ifndef arch_memory_failure +static inline int arch_memory_failure(unsigned long pfn, int flags) +{ +	return -ENXIO; +} +#endif + +#ifndef arch_is_platform_page +static inline bool arch_is_platform_page(u64 paddr) +{ +	return false; +} +#endif  /*   * Error handlers for various types of pages. @@ -3247,7 +3201,6 @@ enum mf_action_page_type {  	MF_MSG_KERNEL_HIGH_ORDER,  	MF_MSG_SLAB,  	MF_MSG_DIFFERENT_COMPOUND, -	MF_MSG_POISONED_HUGE,  	MF_MSG_HUGE,  	MF_MSG_FREE_HUGE,  	MF_MSG_NON_PMD_HUGE, @@ -3262,7 +3215,6 @@ enum mf_action_page_type {  	MF_MSG_CLEAN_LRU,  	MF_MSG_TRUNCATED_LRU,  	MF_MSG_BUDDY, -	MF_MSG_BUDDY_2ND,  	MF_MSG_DAX,  	MF_MSG_UNSPLIT_THP,  	MF_MSG_UNKNOWN, @@ -3391,5 +3343,16 @@ static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)  	return 0;  } +#ifdef CONFIG_ANON_VMA_NAME +int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, +			  unsigned long len_in, const char *name); +#else +static inline int +madvise_set_anon_name(struct mm_struct *mm, unsigned long start, +		      unsigned long len_in, const char *name) { +	return 0; +} +#endif +  #endif /* __KERNEL__ */  #endif /* _LINUX_MM_H */  |