From 842ca547f706b1e05ccf3026a0ab15d24772a188 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 29 Apr 2021 22:55:35 -0700 Subject: mm: move page_mapping_file to pagemap.h page_mapping_file() is only used by some architectures, and then it is usually only used in one place. Make it a static inline function so other architectures don't have to carry this dead code. Link: https://lkml.kernel.org/r/20210317123011.350118-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Acked-by: Mike Rapoport Cc: Huang Ying Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux/mm.h') diff --git a/include/linux/mm.h b/include/linux/mm.h index 21115933b9b8..2e5c207e702c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1629,7 +1629,6 @@ static inline pgoff_t page_index(struct page *page) bool page_mapped(struct page *page); struct address_space *page_mapping(struct page *page); -struct address_space *page_mapping_file(struct page *page); /* * Return true only if the page has been allocated with -- cgit From 458a4f788f8602e5701b3d8c2fb6b021310a7301 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Thu, 29 Apr 2021 22:55:50 -0700 Subject: mm/gup: add a range variant of unpin_user_pages_dirty_lock() Add an unpin_user_page_range_dirty_lock() API which takes a starting page and how many consecutive pages we want to unpin and optionally dirty. To that end, define another iterator for_each_compound_range() that operates in page ranges as opposed to page array. For users (like RDMA mr_dereg) where each sg represents a contiguous set of pages, we're able to more efficiently unpin pages without having to supply an array of pages much of what happens today with unpin_user_pages(). Link: https://lkml.kernel.org/r/20210212130843.13865-4-joao.m.martins@oracle.com Suggested-by: Jason Gunthorpe Signed-off-by: Joao Martins Reviewed-by: Jason Gunthorpe Reviewed-by: John Hubbard Cc: Christoph Hellwig Cc: Doug Ledford Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 ++ mm/gup.c | 62 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) (limited to 'include/linux/mm.h') diff --git a/include/linux/mm.h b/include/linux/mm.h index 2e5c207e702c..702c2a7379d6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1265,6 +1265,8 @@ static inline void put_page(struct page *page) void unpin_user_page(struct page *page); void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, bool make_dirty); +void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, + bool make_dirty); void unpin_user_pages(struct page **pages, unsigned long npages); /** diff --git a/mm/gup.c b/mm/gup.c index de1b75ef44da..66522ae28d09 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -213,6 +213,32 @@ void unpin_user_page(struct page *page) } EXPORT_SYMBOL(unpin_user_page); +static inline void compound_range_next(unsigned long i, unsigned long npages, + struct page **list, struct page **head, + unsigned int *ntails) +{ + struct page *next, *page; + unsigned int nr = 1; + + if (i >= npages) + return; + + next = *list + i; + page = compound_head(next); + if (PageCompound(page) && compound_order(page) >= 1) + nr = min_t(unsigned int, + page + compound_nr(page) - next, npages - i); + + *head = page; + *ntails = nr; +} + +#define for_each_compound_range(__i, __list, __npages, __head, __ntails) \ + for (__i = 0, \ + compound_range_next(__i, __npages, __list, &(__head), &(__ntails)); \ + __i < __npages; __i += __ntails, \ + compound_range_next(__i, __npages, __list, &(__head), &(__ntails))) + static inline void compound_next(unsigned long i, unsigned long npages, struct page **list, struct page **head, unsigned int *ntails) @@ -301,6 +327,42 @@ void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, } EXPORT_SYMBOL(unpin_user_pages_dirty_lock); +/** + * unpin_user_page_range_dirty_lock() - release and optionally dirty + * gup-pinned page range + * + * @page: the starting page of a range maybe marked dirty, and definitely released. + * @npages: number of consecutive pages to release. + * @make_dirty: whether to mark the pages dirty + * + * "gup-pinned page range" refers to a range of pages that has had one of the + * pin_user_pages() variants called on that page. + * + * For the page ranges defined by [page .. page+npages], make that range (or + * its head pages, if a compound page) dirty, if @make_dirty is true, and if the + * page range was previously listed as clean. + * + * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is + * required, then the caller should a) verify that this is really correct, + * because _lock() is usually required, and b) hand code it: + * set_page_dirty_lock(), unpin_user_page(). + * + */ +void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, + bool make_dirty) +{ + unsigned long index; + struct page *head; + unsigned int ntails; + + for_each_compound_range(index, &page, npages, head, ntails) { + if (make_dirty && !PageDirty(head)) + set_page_dirty_lock(head); + put_compound_head(head, ntails, FOLL_PIN); + } +} +EXPORT_SYMBOL(unpin_user_page_range_dirty_lock); + /** * unpin_user_pages() - release an array of gup-pinned pages. * @pages: array of pages to be marked dirty and released. -- cgit From 4066c119483af8e86a75447fd35be1d2553d370f Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Thu, 29 Apr 2021 22:55:56 -0700 Subject: mm: gup: remove FOLL_SPLIT Since commit 5a52c9df62b4 ("uprobe: use FOLL_SPLIT_PMD instead of FOLL_SPLIT") and commit ba925fa35057 ("s390/gmap: improve THP splitting") FOLL_SPLIT has not been used anymore. Remove the dead code. Link: https://lkml.kernel.org/r/20210330203900.9222-1-shy828301@gmail.com Signed-off-by: Yang Shi Reviewed-by: John Hubbard Reviewed-by: Jason Gunthorpe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/vm/transhuge.rst | 5 ----- include/linux/mm.h | 1 - mm/gup.c | 28 ++-------------------------- 3 files changed, 2 insertions(+), 32 deletions(-) (limited to 'include/linux/mm.h') diff --git a/Documentation/vm/transhuge.rst b/Documentation/vm/transhuge.rst index 0ed23e59abe5..216db1d67d04 100644 --- a/Documentation/vm/transhuge.rst +++ b/Documentation/vm/transhuge.rst @@ -53,11 +53,6 @@ prevent the page from being split by anyone. of handling GUP on hugetlbfs will also work fine on transparent hugepage backed mappings. -In case you can't handle compound pages if they're returned by -follow_page, the FOLL_SPLIT bit can be specified as a parameter to -follow_page, so that it will split the hugepages before returning -them. - Graceful fallback ================= diff --git a/include/linux/mm.h b/include/linux/mm.h index 702c2a7379d6..64be3baf861a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2791,7 +2791,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO * and return without waiting upon it */ #define FOLL_POPULATE 0x40 /* fault in page */ -#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ diff --git a/mm/gup.c b/mm/gup.c index 66522ae28d09..71e546e279fc 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -516,18 +516,6 @@ retry: } } - if (flags & FOLL_SPLIT && PageTransCompound(page)) { - get_page(page); - pte_unmap_unlock(ptep, ptl); - lock_page(page); - ret = split_huge_page(page); - unlock_page(page); - put_page(page); - if (ret) - return ERR_PTR(ret); - goto retry; - } - /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */ if (unlikely(!try_grab_page(page, flags))) { page = ERR_PTR(-ENOMEM); @@ -672,7 +660,7 @@ retry_locked: spin_unlock(ptl); return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } - if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) { + if (flags & FOLL_SPLIT_PMD) { int ret; page = pmd_page(*pmd); if (is_huge_zero_page(page)) { @@ -681,19 +669,7 @@ retry_locked: split_huge_pmd(vma, pmd, address); if (pmd_trans_unstable(pmd)) ret = -EBUSY; - } else if (flags & FOLL_SPLIT) { - if (unlikely(!try_get_page(page))) { - spin_unlock(ptl); - return ERR_PTR(-ENOMEM); - } - spin_unlock(ptl); - lock_page(page); - ret = split_huge_page(page); - unlock_page(page); - put_page(page); - if (pmd_none(*pmd)) - return no_page_table(vma, flags); - } else { /* flags & FOLL_SPLIT_PMD */ + } else { spin_unlock(ptl); split_huge_pmd(vma, pmd, address); ret = pte_alloc(mm, pmd) ? -ENOMEM : 0; -- cgit From 74ffa5a3e68504dd289135b1cf0422c19ffb3f2e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 29 Apr 2021 22:57:29 -0700 Subject: mm: add remap_pfn_range_notrack Patch series "add remap_pfn_range_notrack instead of reinventing it in i915", v2. i915 has some reason to want to avoid the track_pfn_remap overhead in remap_pfn_range. Add a function to the core VM to do just that rather than reinventing the functionality poorly in the driver. Note that the remap_io_sg path does get exercises when using Xorg on my Thinkpad X1, so this should be considered lightly tested, I've not managed to hit the remap_io_mapping path at all. This patch (of 4): Add a version of remap_pfn_range that does not call track_pfn_range. This will be used to fix horrible abuses of VM internals in the i915 driver. Link: https://lkml.kernel.org/r/20210326055505.1424432-1-hch@lst.de Link: https://lkml.kernel.org/r/20210326055505.1424432-2-hch@lst.de Signed-off-by: Christoph Hellwig Acked-by: Daniel Vetter Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Chris Wilson Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 ++ mm/memory.c | 51 +++++++++++++++++++++++++++++++-------------------- 2 files changed, 33 insertions(+), 20 deletions(-) (limited to 'include/linux/mm.h') diff --git a/include/linux/mm.h b/include/linux/mm.h index 64be3baf861a..a8335cecf706 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2732,6 +2732,8 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t); +int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t prot); int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num); diff --git a/mm/memory.c b/mm/memory.c index f95d12032b34..8dc69471be6c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2260,26 +2260,17 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, return 0; } -/** - * remap_pfn_range - remap kernel memory to userspace - * @vma: user vma to map to - * @addr: target page aligned user address to start at - * @pfn: page frame number of kernel physical memory address - * @size: size of mapping area - * @prot: page protection flags for this mapping - * - * Note: this is only safe if the mm semaphore is held when called. - * - * Return: %0 on success, negative error code otherwise. +/* + * Variant of remap_pfn_range that does not call track_pfn_remap. The caller + * must have pre-validated the caching bits of the pgprot_t. */ -int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, - unsigned long pfn, unsigned long size, pgprot_t prot) +int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t prot) { pgd_t *pgd; unsigned long next; unsigned long end = addr + PAGE_ALIGN(size); struct mm_struct *mm = vma->vm_mm; - unsigned long remap_pfn = pfn; int err; if (WARN_ON_ONCE(!PAGE_ALIGNED(addr))) @@ -2309,10 +2300,6 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, vma->vm_pgoff = pfn; } - err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size)); - if (err) - return -EINVAL; - vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; BUG_ON(addr >= end); @@ -2324,12 +2311,36 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, err = remap_p4d_range(mm, pgd, addr, next, pfn + (addr >> PAGE_SHIFT), prot); if (err) - break; + return err; } while (pgd++, addr = next, addr != end); + return 0; +} + +/** + * remap_pfn_range - remap kernel memory to userspace + * @vma: user vma to map to + * @addr: target page aligned user address to start at + * @pfn: page frame number of kernel physical memory address + * @size: size of mapping area + * @prot: page protection flags for this mapping + * + * Note: this is only safe if the mm semaphore is held when called. + * + * Return: %0 on success, negative error code otherwise. + */ +int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t prot) +{ + int err; + + err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); if (err) - untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size)); + return -EINVAL; + err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); + if (err) + untrack_pfn(vma, pfn, PAGE_ALIGN(size)); return err; } EXPORT_SYMBOL(remap_pfn_range); -- cgit From 14d071134c740cfe61c09fc506fd3ab052beea10 Mon Sep 17 00:00:00 2001 From: Brian Geffon Date: Thu, 29 Apr 2021 22:57:48 -0700 Subject: Revert "mremap: don't allow MREMAP_DONTUNMAP on special_mappings and aio" This reverts commit cd544fd1dc9293c6702fab6effa63dac1cc67e99. As discussed in [1] this commit was a no-op because the mapping type was checked in vma_to_resize before move_vma is ever called. This meant that vm_ops->mremap() would never be called on such mappings. Furthermore, we've since expanded support of MREMAP_DONTUNMAP to non-anonymous mappings, and these special mappings are still protected by the existing check of !VM_DONTEXPAND and !VM_PFNMAP which will result in a -EINVAL. 1. https://lkml.org/lkml/2020/12/28/2340 Link: https://lkml.kernel.org/r/20210323182520.2712101-2-bgeffon@google.com Signed-off-by: Brian Geffon Acked-by: Hugh Dickins Reviewed-by: Dmitry Safonov <0x7f454c46@gmail.com> Cc: Alejandro Colomar Cc: Andrea Arcangeli Cc: Andy Lutomirski Cc: Axel Rasmussen Cc: "Kirill A . Shutemov" Cc: Lokesh Gidra Cc: Michael Kerrisk Cc: "Michael S . Tsirkin" Cc: Mike Rapoport Cc: Minchan Kim Cc: Peter Xu Cc: Sonny Rao Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 2 +- fs/aio.c | 5 +---- include/linux/mm.h | 2 +- mm/mmap.c | 6 +----- mm/mremap.c | 2 +- 5 files changed, 5 insertions(+), 12 deletions(-) (limited to 'include/linux/mm.h') diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 935af2ac6b1a..05a89e33fde2 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -1458,7 +1458,7 @@ static int pseudo_lock_dev_release(struct inode *inode, struct file *filp) return 0; } -static int pseudo_lock_dev_mremap(struct vm_area_struct *area, unsigned long flags) +static int pseudo_lock_dev_mremap(struct vm_area_struct *area) { /* Not supported */ return -EINVAL; diff --git a/fs/aio.c b/fs/aio.c index 1f32da13d39e..76ce0cc3ee4e 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -323,16 +323,13 @@ static void aio_free_ring(struct kioctx *ctx) } } -static int aio_ring_mremap(struct vm_area_struct *vma, unsigned long flags) +static int aio_ring_mremap(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct mm_struct *mm = vma->vm_mm; struct kioctx_table *table; int i, res = -EINVAL; - if (flags & MREMAP_DONTUNMAP) - return -EINVAL; - spin_lock(&mm->ioctx_lock); rcu_read_lock(); table = rcu_dereference(mm->ioctx_table); diff --git a/include/linux/mm.h b/include/linux/mm.h index a8335cecf706..93097dbd9604 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -580,7 +580,7 @@ struct vm_operations_struct { void (*close)(struct vm_area_struct * area); /* Called any time before splitting to check if it's allowed */ int (*may_split)(struct vm_area_struct *area, unsigned long addr); - int (*mremap)(struct vm_area_struct *area, unsigned long flags); + int (*mremap)(struct vm_area_struct *area); /* * Called by mprotect() to make driver-specific permission * checks before mprotect() is finalised. The VMA must not diff --git a/mm/mmap.c b/mm/mmap.c index 1d96a21acb2f..347ef9b83bb5 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3409,14 +3409,10 @@ static const char *special_mapping_name(struct vm_area_struct *vma) return ((struct vm_special_mapping *)vma->vm_private_data)->name; } -static int special_mapping_mremap(struct vm_area_struct *new_vma, - unsigned long flags) +static int special_mapping_mremap(struct vm_area_struct *new_vma) { struct vm_special_mapping *sm = new_vma->vm_private_data; - if (flags & MREMAP_DONTUNMAP) - return -EINVAL; - if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) return -EFAULT; diff --git a/mm/mremap.c b/mm/mremap.c index db5b8b28c2dd..d22629ff8f3c 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -545,7 +545,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, if (moved_len < old_len) { err = -ENOMEM; } else if (vma->vm_ops && vma->vm_ops->mremap) { - err = vma->vm_ops->mremap(new_vma, flags); + err = vma->vm_ops->mremap(new_vma); } if (unlikely(err)) { -- cgit From 78f4841e34763079be0661744c1ca997be64eb56 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 29 Apr 2021 22:59:25 -0700 Subject: mm/doc: fix fault_flag_allow_retry_first kerneldoc make htmldocs reports: include/linux/mm.h:496: warning: Function parameter or member 'flags' not described in 'fault_flag_allow_retry_first' Add a description. Link: https://lkml.kernel.org/r/20210322195022.2143603-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: John Hubbard Cc: Mike Rapoport Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux/mm.h') diff --git a/include/linux/mm.h b/include/linux/mm.h index 93097dbd9604..382c33e7d906 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -485,6 +485,7 @@ extern pgprot_t protection_map[16]; /** * fault_flag_allow_retry_first - check ALLOW_RETRY the first time + * @flags: Fault flags. * * This is mostly used for places where we want to try to avoid taking * the mmap_lock for too long a time when waiting for another condition -- cgit From 136dfc9949f84089217f84e6478471dabbf14ba7 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 29 Apr 2021 22:59:28 -0700 Subject: mm/doc: fix page_maybe_dma_pinned kerneldoc make htmldocs reports: include/linux/mm.h:1341: warning: Excess function parameter 'Return' description in 'page_maybe_dma_pinned' Fix a few other formatting nits while I'm editing this description. Link: https://lkml.kernel.org/r/20210322195022.2143603-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Mike Rapoport Reviewed-by: John Hubbard Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux/mm.h') diff --git a/include/linux/mm.h b/include/linux/mm.h index 382c33e7d906..5acda8761935 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1271,10 +1271,11 @@ void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, void unpin_user_pages(struct page **pages, unsigned long npages); /** - * page_maybe_dma_pinned() - report if a page is pinned for DMA. + * page_maybe_dma_pinned - Report if a page is pinned for DMA. + * @page: The page. * * This function checks if a page has been pinned via a call to - * pin_user_pages*(). + * a function in the pin_user_pages() family. * * For non-huge pages, the return value is partially fuzzy: false is not fuzzy, * because it means "definitely not pinned for DMA", but true means "probably @@ -1292,9 +1293,8 @@ void unpin_user_pages(struct page **pages, unsigned long npages); * * For more information, please see Documentation/core-api/pin_user_pages.rst. * - * @page: pointer to page to be queried. - * @Return: True, if it is likely that the page has been "dma-pinned". - * False, if the page is definitely not dma-pinned. + * Return: True, if it is likely that the page has been "dma-pinned". + * False, if the page is definitely not dma-pinned. */ static inline bool page_maybe_dma_pinned(struct page *page) { -- cgit From da2f5eb3d344503c4d851bdf1ae2379167074413 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 29 Apr 2021 22:59:31 -0700 Subject: mm/doc: turn fault flags into an enum The kernel-doc script complains about include/linux/mm.h:425: warning: wrong kernel-doc identifier on line: * Fault flag definitions. I don't know how to document a series of #defines, so turn these definitions into an enum and document that instead. Link: https://lkml.kernel.org/r/20210322195022.2143603-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Mike Rapoport Cc: John Hubbard Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) (limited to 'include/linux/mm.h') diff --git a/include/linux/mm.h b/include/linux/mm.h index 5acda8761935..64b2e3a0b94d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -432,8 +432,7 @@ extern unsigned int kobjsize(const void *objp); extern pgprot_t protection_map[16]; /** - * Fault flag definitions. - * + * enum fault_flag - Fault flag definitions. * @FAULT_FLAG_WRITE: Fault was a write fault. * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE. * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked. @@ -464,16 +463,18 @@ extern pgprot_t protection_map[16]; * signals before a retry to make sure the continuous page faults can still be * interrupted if necessary. */ -#define FAULT_FLAG_WRITE 0x01 -#define FAULT_FLAG_MKWRITE 0x02 -#define FAULT_FLAG_ALLOW_RETRY 0x04 -#define FAULT_FLAG_RETRY_NOWAIT 0x08 -#define FAULT_FLAG_KILLABLE 0x10 -#define FAULT_FLAG_TRIED 0x20 -#define FAULT_FLAG_USER 0x40 -#define FAULT_FLAG_REMOTE 0x80 -#define FAULT_FLAG_INSTRUCTION 0x100 -#define FAULT_FLAG_INTERRUPTIBLE 0x200 +enum fault_flag { + FAULT_FLAG_WRITE = 1 << 0, + FAULT_FLAG_MKWRITE = 1 << 1, + FAULT_FLAG_ALLOW_RETRY = 1 << 2, + FAULT_FLAG_RETRY_NOWAIT = 1 << 3, + FAULT_FLAG_KILLABLE = 1 << 4, + FAULT_FLAG_TRIED = 1 << 5, + FAULT_FLAG_USER = 1 << 6, + FAULT_FLAG_REMOTE = 1 << 7, + FAULT_FLAG_INSTRUCTION = 1 << 8, + FAULT_FLAG_INTERRUPTIBLE = 1 << 9, +}; /* * The default fault flags that should be used by most of the @@ -496,7 +497,7 @@ extern pgprot_t protection_map[16]; * Return: true if the page fault allows retry and this is the first * attempt of the fault handling; false otherwise. */ -static inline bool fault_flag_allow_retry_first(unsigned int flags) +static inline bool fault_flag_allow_retry_first(enum fault_flag flags) { return (flags & FAULT_FLAG_ALLOW_RETRY) && (!(flags & FAULT_FLAG_TRIED)); @@ -531,7 +532,7 @@ struct vm_fault { pgoff_t pgoff; /* Logical page offset based on vma */ unsigned long address; /* Faulting virtual address */ }; - unsigned int flags; /* FAULT_FLAG_xxx flags + enum fault_flag flags; /* FAULT_FLAG_xxx flags * XXX: should really be 'const' */ pmd_t *pmd; /* Pointer to pmd entry matching * the 'address' */ -- cgit From 1f9d03c5e999ed5a57fa4d8aec9fdf67a6234b80 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Thu, 29 Apr 2021 23:00:55 -0700 Subject: mm: move mem_init_print_info() into mm_init() mem_init_print_info() is called in mem_init() on each architecture, and pass NULL argument, so using void argument and move it into mm_init(). Link: https://lkml.kernel.org/r/20210317015210.33641-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Acked-by: Dave Hansen [x86] Reviewed-by: Christophe Leroy [powerpc] Acked-by: David Hildenbrand Tested-by: Anatoly Pugachev [sparc64] Acked-by: Russell King [arm] Acked-by: Mike Rapoport Cc: Catalin Marinas Cc: Richard Henderson Cc: Guo Ren Cc: Yoshinori Sato Cc: Huacai Chen Cc: Jonas Bonn Cc: Palmer Dabbelt Cc: Heiko Carstens Cc: "David S. Miller" Cc: "Peter Zijlstra" Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/mm/init.c | 1 - arch/arc/mm/init.c | 1 - arch/arm/mm/init.c | 2 -- arch/arm64/mm/init.c | 2 -- arch/csky/mm/init.c | 1 - arch/h8300/mm/init.c | 2 -- arch/hexagon/mm/init.c | 1 - arch/ia64/mm/init.c | 1 - arch/m68k/mm/init.c | 1 - arch/microblaze/mm/init.c | 1 - arch/mips/loongson64/numa.c | 1 - arch/mips/mm/init.c | 1 - arch/mips/sgi-ip27/ip27-memory.c | 1 - arch/nds32/mm/init.c | 1 - arch/nios2/mm/init.c | 1 - arch/openrisc/mm/init.c | 2 -- arch/parisc/mm/init.c | 2 -- arch/powerpc/mm/mem.c | 1 - arch/riscv/mm/init.c | 1 - arch/s390/mm/init.c | 2 -- arch/sh/mm/init.c | 1 - arch/sparc/mm/init_32.c | 2 -- arch/sparc/mm/init_64.c | 1 - arch/um/kernel/mem.c | 1 - arch/x86/mm/init_32.c | 2 -- arch/x86/mm/init_64.c | 2 -- arch/xtensa/mm/init.c | 1 - include/linux/mm.h | 2 +- init/main.c | 1 + mm/page_alloc.c | 10 +++++----- 30 files changed, 7 insertions(+), 42 deletions(-) (limited to 'include/linux/mm.h') diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 3c42b3147fd6..a97650a618f1 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -282,5 +282,4 @@ mem_init(void) set_max_mapnr(max_low_pfn); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); memblock_free_all(); - mem_init_print_info(NULL); } diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index ce07e697916c..33832e36bdb7 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -194,7 +194,6 @@ void __init mem_init(void) { memblock_free_all(); highmem_init(); - mem_init_print_info(NULL); } #ifdef CONFIG_HIGHMEM diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 828a2561b229..7022b7b5c400 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -316,8 +316,6 @@ void __init mem_init(void) free_highpages(); - mem_init_print_info(NULL); - /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 470f92e6a542..ef031511ce29 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -491,8 +491,6 @@ void __init mem_init(void) /* this will put all unused low memory onto the freelists */ memblock_free_all(); - mem_init_print_info(NULL); - /* * Check boundaries twice: Some fundamental inconsistencies can be * detected at build time already. diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index 894050a8ce09..bf2004aa811a 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -107,7 +107,6 @@ void __init mem_init(void) free_highmem_page(page); } #endif - mem_init_print_info(NULL); } void free_initmem(void) diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c index 1f3b345d68b9..f7bf4693e3b2 100644 --- a/arch/h8300/mm/init.c +++ b/arch/h8300/mm/init.c @@ -98,6 +98,4 @@ void __init mem_init(void) /* this will put all low memory onto the freelists */ memblock_free_all(); - - mem_init_print_info(NULL); } diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index f2e6c868e477..f01e91e10d95 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c @@ -55,7 +55,6 @@ void __init mem_init(void) { /* No idea where this is actually declared. Seems to evade LXR. */ memblock_free_all(); - mem_init_print_info(NULL); /* * To-Do: someone somewhere should wipe out the bootmem map diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 97a13eda81bf..064a967a7b6e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -449,7 +449,6 @@ mem_init (void) set_max_mapnr(max_low_pfn); high_memory = __va(max_low_pfn * PAGE_SIZE); memblock_free_all(); - mem_init_print_info(NULL); /* * For fsyscall entrpoints with no light-weight handler, use the ordinary diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 14c1e541451c..1759ab875d47 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -153,5 +153,4 @@ void __init mem_init(void) /* this will put all memory onto the freelists */ memblock_free_all(); init_pointer_tables(); - mem_init_print_info(NULL); } diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 05cf1fb3f5ff..ab55c70380a5 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -131,7 +131,6 @@ void __init mem_init(void) highmem_setup(); #endif - mem_init_print_info(NULL); mem_init_done = 1; } diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c index 8315c871c435..fa9b4a487a47 100644 --- a/arch/mips/loongson64/numa.c +++ b/arch/mips/loongson64/numa.c @@ -178,7 +178,6 @@ void __init mem_init(void) high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); memblock_free_all(); setup_zero_pages(); /* This comes from node 0 */ - mem_init_print_info(NULL); } /* All PCI device belongs to logical Node-0 */ diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 5cb73bf74a8b..c36358758969 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -467,7 +467,6 @@ void __init mem_init(void) memblock_free_all(); setup_zero_pages(); /* Setup zeroed pages. */ mem_init_free_highmem(); - mem_init_print_info(NULL); #ifdef CONFIG_64BIT if ((unsigned long) &_text > (unsigned long) CKSEG0) diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 87bb6945ec25..6173684b5aaa 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -420,5 +420,4 @@ void __init mem_init(void) high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); memblock_free_all(); setup_zero_pages(); /* This comes from node 0 */ - mem_init_print_info(NULL); } diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c index fa86f7b2f416..f63f839738c4 100644 --- a/arch/nds32/mm/init.c +++ b/arch/nds32/mm/init.c @@ -191,7 +191,6 @@ void __init mem_init(void) /* this will put all low memory onto the freelists */ memblock_free_all(); - mem_init_print_info(NULL); pr_info("virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index 61862dbb0e32..613fcaa5988a 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -71,7 +71,6 @@ void __init mem_init(void) /* this will put all memory onto the freelists */ memblock_free_all(); - mem_init_print_info(NULL); } void __init mmu_init(void) diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index bf9b2310fc93..d5641198b90c 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -211,8 +211,6 @@ void __init mem_init(void) /* this will put all low memory onto the freelists */ memblock_free_all(); - mem_init_print_info(NULL); - printk("mem_init_done ...........................................\n"); mem_init_done = 1; return; diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 9ca4e4ff6895..591a4e939415 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -573,8 +573,6 @@ void __init mem_init(void) #endif parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); - mem_init_print_info(NULL); - #if 0 /* * Do not expose the virtual kernel memory layout to userspace. diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 4e8ce6d85232..7e11c4cb08b8 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -312,7 +312,6 @@ void __init mem_init(void) (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; #endif - mem_init_print_info(NULL); #ifdef CONFIG_PPC32 pr_info("Kernel virtual memory layout:\n"); #ifdef CONFIG_KASAN diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 067583ab1bd7..92e39cfa5227 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -102,7 +102,6 @@ void __init mem_init(void) high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); memblock_free_all(); - mem_init_print_info(NULL); print_vm_layout(); } diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 0e76b2127dc6..8ac710de1ab1 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -209,8 +209,6 @@ void __init mem_init(void) setup_zero_pages(); /* Setup zeroed pages. */ cmma_init_nodat(); - - mem_init_print_info(NULL); } void free_initmem(void) diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 0db6919af8d3..168d7d4dd735 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -359,7 +359,6 @@ void __init mem_init(void) vsyscall_init(); - mem_init_print_info(NULL); pr_info("virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 6139c5700ccc..1e9f577f084d 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -292,8 +292,6 @@ void __init mem_init(void) map_high_region(start_pfn, end_pfn); } - - mem_init_print_info(NULL); } void sparc_flush_page_to_ram(struct page *page) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 182bb7bdaa0a..e454f179cf5d 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2520,7 +2520,6 @@ void __init mem_init(void) } mark_page_reserved(mem_map_zero); - mem_init_print_info(NULL); if (tlb_type == cheetah || tlb_type == cheetah_plus) cheetah_ecache_flush_init(); diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 9242dc91d751..9019ff5905b1 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -54,7 +54,6 @@ void __init mem_init(void) memblock_free_all(); max_low_pfn = totalram_pages(); max_pfn = max_low_pfn; - mem_init_print_info(NULL); kmalloc_ok = 1; } diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index da31c2635ee4..21ffb03f6c72 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -755,8 +755,6 @@ void __init mem_init(void) after_bootmem = 1; x86_init.hyper.init_after_bootmem(); - mem_init_print_info(NULL); - /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 58ae2f746b3e..e527d829e1ed 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1351,8 +1351,6 @@ void __init mem_init(void) kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); preallocate_vmalloc_pages(); - - mem_init_print_info(NULL); } #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 2daeba9e454e..6a32b2cf2718 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -119,7 +119,6 @@ void __init mem_init(void) memblock_free_all(); - mem_init_print_info(NULL); pr_info("virtual kernel memory layout:\n" #ifdef CONFIG_KASAN " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n" diff --git a/include/linux/mm.h b/include/linux/mm.h index 64b2e3a0b94d..011f43605807 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2360,7 +2360,7 @@ extern unsigned long free_reserved_area(void *start, void *end, int poison, const char *s); extern void adjust_managed_page_count(struct page *page, long count); -extern void mem_init_print_info(const char *str); +extern void mem_init_print_info(void); extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); diff --git a/init/main.c b/init/main.c index ae96c79ad2d3..dd11bfd10ead 100644 --- a/init/main.c +++ b/init/main.c @@ -830,6 +830,7 @@ static void __init mm_init(void) report_meminit(); stack_depot_init(); mem_init(); + mem_init_print_info(); /* page_owner must be initialized after buddy is ready */ page_ext_init_flatmem_late(); kmem_cache_init(); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6314de5387f5..b55073d0e84a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7734,7 +7734,7 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char return pages; } -void __init mem_init_print_info(const char *str) +void __init mem_init_print_info(void) { unsigned long physpages, codesize, datasize, rosize, bss_size; unsigned long init_code_size, init_data_size; @@ -7773,17 +7773,17 @@ void __init mem_init_print_info(const char *str) #ifdef CONFIG_HIGHMEM ", %luK highmem" #endif - "%s%s)\n", + ")\n", nr_free_pages() << (PAGE_SHIFT - 10), physpages << (PAGE_SHIFT - 10), codesize >> 10, datasize >> 10, rosize >> 10, (init_data_size + init_code_size) >> 10, bss_size >> 10, (physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10), - totalcma_pages << (PAGE_SHIFT - 10), + totalcma_pages << (PAGE_SHIFT - 10) #ifdef CONFIG_HIGHMEM - totalhigh_pages() << (PAGE_SHIFT - 10), + , totalhigh_pages() << (PAGE_SHIFT - 10) #endif - str ? ", " : "", str ? str : ""); + ); } /** -- cgit