diff options
Diffstat (limited to 'include/linux/mm.h')
| -rw-r--r-- | include/linux/mm.h | 70 | 
1 files changed, 56 insertions, 14 deletions
| diff --git a/include/linux/mm.h b/include/linux/mm.h index 5df5feb49575..08ed53eeedd5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -309,10 +309,34 @@ struct vm_fault {  					 * VM_FAULT_DAX_LOCKED and fill in  					 * entry here.  					 */ -	/* for ->map_pages() only */ -	pgoff_t max_pgoff;		/* map pages for offset from pgoff till -					 * max_pgoff inclusive */ -	pte_t *pte;			/* pte entry associated with ->pgoff */ +}; + +/* + * Page fault context: passes though page fault handler instead of endless list + * of function arguments. + */ +struct fault_env { +	struct vm_area_struct *vma;	/* Target VMA */ +	unsigned long address;		/* Faulting virtual address */ +	unsigned int flags;		/* FAULT_FLAG_xxx flags */ +	pmd_t *pmd;			/* Pointer to pmd entry matching +					 * the 'address' +					 */ +	pte_t *pte;			/* Pointer to pte entry matching +					 * the 'address'. NULL if the page +					 * table hasn't been allocated. +					 */ +	spinlock_t *ptl;		/* Page table lock. +					 * Protects pte page table if 'pte' +					 * is not NULL, otherwise pmd. +					 */ +	pgtable_t prealloc_pte;		/* Pre-allocated pte page table. +					 * vm_ops->map_pages() calls +					 * alloc_set_pte() from atomic context. +					 * do_fault_around() pre-allocates +					 * page table to avoid allocation from +					 * atomic context. +					 */  };  /* @@ -327,7 +351,8 @@ struct vm_operations_struct {  	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);  	int (*pmd_fault)(struct vm_area_struct *, unsigned long address,  						pmd_t *, unsigned int flags); -	void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); +	void (*map_pages)(struct fault_env *fe, +			pgoff_t start_pgoff, pgoff_t end_pgoff);  	/* notification that a previously read-only page is about to become  	 * writable, if an error is returned it will cause a SIGBUS */ @@ -537,7 +562,6 @@ void __put_page(struct page *page);  void put_pages_list(struct list_head *pages);  void split_page(struct page *page, unsigned int order); -int split_free_page(struct page *page);  /*   * Compound pages have a destructor function.  Provide a @@ -601,8 +625,8 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)  	return pte;  } -void do_set_pte(struct vm_area_struct *vma, unsigned long address, -		struct page *page, pte_t *pte, bool write, bool anon, bool old); +int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, +		struct page *page);  #endif  /* @@ -909,6 +933,11 @@ static inline struct zone *page_zone(const struct page *page)  	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];  } +static inline pg_data_t *page_pgdat(const struct page *page) +{ +	return NODE_DATA(page_to_nid(page)); +} +  #ifdef SECTION_IN_PAGE_FLAGS  static inline void set_page_section(struct page *page, unsigned long section)  { @@ -949,11 +978,21 @@ static inline struct mem_cgroup *page_memcg(struct page *page)  {  	return page->mem_cgroup;  } +static inline struct mem_cgroup *page_memcg_rcu(struct page *page) +{ +	WARN_ON_ONCE(!rcu_read_lock_held()); +	return READ_ONCE(page->mem_cgroup); +}  #else  static inline struct mem_cgroup *page_memcg(struct page *page)  {  	return NULL;  } +static inline struct mem_cgroup *page_memcg_rcu(struct page *page) +{ +	WARN_ON_ONCE(!rcu_read_lock_held()); +	return NULL; +}  #endif  /* @@ -1035,6 +1074,7 @@ static inline pgoff_t page_file_index(struct page *page)  }  bool page_mapped(struct page *page); +struct address_space *page_mapping(struct page *page);  /*   * Return true only if the page has been allocated with @@ -1215,15 +1255,14 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page);  int invalidate_inode_page(struct page *page);  #ifdef CONFIG_MMU -extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, -			unsigned long address, unsigned int flags); +extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, +		unsigned int flags);  extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,  			    unsigned long address, unsigned int fault_flags,  			    bool *unlocked);  #else -static inline int handle_mm_fault(struct mm_struct *mm, -			struct vm_area_struct *vma, unsigned long address, -			unsigned int flags) +static inline int handle_mm_fault(struct vm_area_struct *vma, +		unsigned long address, unsigned int flags)  {  	/* should never happen if there's no MMU */  	BUG(); @@ -2063,7 +2102,8 @@ extern void truncate_inode_pages_final(struct address_space *);  /* generic vm_area_ops exported for stackable file systems */  extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); -extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf); +extern void filemap_map_pages(struct fault_env *fe, +		pgoff_t start_pgoff, pgoff_t end_pgoff);  extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);  /* mm/page-writeback.c */ @@ -2259,6 +2299,8 @@ static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)  }  #endif	/* __HAVE_ARCH_GATE_AREA */ +extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); +  #ifdef CONFIG_SYSCTL  extern int sysctl_drop_caches;  int drop_caches_sysctl_handler(struct ctl_table *, int, |