diff options
Diffstat (limited to 'include/linux/mm.h')
| -rw-r--r-- | include/linux/mm.h | 87 | 
1 files changed, 37 insertions, 50 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 0334ca97c584..cc292273e6ba 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -805,6 +805,24 @@ static inline void set_compound_order(struct page *page, unsigned int order)  	page[1].compound_order = order;  } +/* Returns the number of pages in this potentially compound page. */ +static inline unsigned long compound_nr(struct page *page) +{ +	return 1UL << compound_order(page); +} + +/* Returns the number of bytes in this potentially compound page. */ +static inline unsigned long page_size(struct page *page) +{ +	return PAGE_SIZE << compound_order(page); +} + +/* Returns the number of bits needed for the number of bytes in a page */ +static inline unsigned int page_shift(struct page *page) +{ +	return PAGE_SHIFT + compound_order(page); +} +  void free_compound_page(struct page *page);  #ifdef CONFIG_MMU @@ -1057,8 +1075,9 @@ static inline void put_user_page(struct page *page)  	put_page(page);  } -void put_user_pages_dirty(struct page **pages, unsigned long npages); -void put_user_pages_dirty_lock(struct page **pages, unsigned long npages); +void put_user_pages_dirty_lock(struct page **pages, unsigned long npages, +			       bool make_dirty); +  void put_user_pages(struct page **pages, unsigned long npages);  #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) @@ -1405,7 +1424,11 @@ extern void pagefault_out_of_memory(void);  extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); +#ifdef CONFIG_MMU  extern bool can_do_mlock(void); +#else +static inline bool can_do_mlock(void) { return false; } +#endif  extern int user_shm_lock(size_t, struct user_struct *);  extern void user_shm_unlock(size_t, struct user_struct *); @@ -1430,54 +1453,8 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address,  void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,  		unsigned long start, unsigned long end); -/** - * mm_walk - callbacks for walk_page_range - * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry - *	       this handler should only handle pud_trans_huge() puds. - *	       the pmd_entry or pte_entry callbacks will be used for - *	       regular PUDs. - * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry - *	       this handler is required to be able to handle - *	       pmd_trans_huge() pmds.  They may simply choose to - *	       split_huge_page() instead of handling it explicitly. - * @pte_entry: if set, called for each non-empty PTE (4th-level) entry - * @pte_hole: if set, called for each hole at all levels - * @hugetlb_entry: if set, called for each hugetlb entry - * @test_walk: caller specific callback function to determine whether - *             we walk over the current vma or not. Returning 0 - *             value means "do page table walk over the current vma," - *             and a negative one means "abort current page table walk - *             right now." 1 means "skip the current vma." - * @mm:        mm_struct representing the target process of page table walk - * @vma:       vma currently walked (NULL if walking outside vmas) - * @private:   private data for callbacks' usage - * - * (see the comment on walk_page_range() for more details) - */ -struct mm_walk { -	int (*pud_entry)(pud_t *pud, unsigned long addr, -			 unsigned long next, struct mm_walk *walk); -	int (*pmd_entry)(pmd_t *pmd, unsigned long addr, -			 unsigned long next, struct mm_walk *walk); -	int (*pte_entry)(pte_t *pte, unsigned long addr, -			 unsigned long next, struct mm_walk *walk); -	int (*pte_hole)(unsigned long addr, unsigned long next, -			struct mm_walk *walk); -	int (*hugetlb_entry)(pte_t *pte, unsigned long hmask, -			     unsigned long addr, unsigned long next, -			     struct mm_walk *walk); -	int (*test_walk)(unsigned long addr, unsigned long next, -			struct mm_walk *walk); -	struct mm_struct *mm; -	struct vm_area_struct *vma; -	void *private; -}; -  struct mmu_notifier_range; -int walk_page_range(unsigned long addr, unsigned long end, -		struct mm_walk *walk); -int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);  void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,  		unsigned long end, unsigned long floor, unsigned long ceiling);  int copy_page_range(struct mm_struct *dst, struct mm_struct *src, @@ -1972,7 +1949,7 @@ static inline void pgtable_init(void)  	pgtable_cache_init();  } -static inline bool pgtable_page_ctor(struct page *page) +static inline bool pgtable_pte_page_ctor(struct page *page)  {  	if (!ptlock_init(page))  		return false; @@ -1981,7 +1958,7 @@ static inline bool pgtable_page_ctor(struct page *page)  	return true;  } -static inline void pgtable_page_dtor(struct page *page) +static inline void pgtable_pte_page_dtor(struct page *page)  {  	ptlock_free(page);  	__ClearPageTable(page); @@ -2351,6 +2328,8 @@ extern int install_special_mapping(struct mm_struct *mm,  				   unsigned long addr, unsigned long len,  				   unsigned long flags, struct page **pages); +unsigned long randomize_stack_top(unsigned long stack_top); +  extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);  extern unsigned long mmap_region(struct file *file, unsigned long addr, @@ -2614,6 +2593,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,  #define FOLL_COW	0x4000	/* internal GUP flag */  #define FOLL_ANON	0x8000	/* don't do file mappings */  #define FOLL_LONGTERM	0x10000	/* mapping lifetime is indefinite: see below */ +#define FOLL_SPLIT_PMD	0x20000	/* split huge pmd before returning */  /*   * NOTE on FOLL_LONGTERM: @@ -2891,5 +2871,12 @@ void __init setup_nr_node_ids(void);  static inline void setup_nr_node_ids(void) {}  #endif +extern int memcmp_pages(struct page *page1, struct page *page2); + +static inline int pages_identical(struct page *page1, struct page *page2) +{ +	return !memcmp_pages(page1, page2); +} +  #endif /* __KERNEL__ */  #endif /* _LINUX_MM_H */  |