From 6a6fe9ebd571a4092b7d5c1f11e4e1e15d296fa5 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Fri, 9 Dec 2022 10:06:18 +0800 Subject: mm: swap: convert mark_page_lazyfree() to folio_mark_lazyfree() mark_page_lazyfree() and the callers are converted to use folio, this rename and make it to take in a folio argument instead of calling page_folio(). Link: https://lkml.kernel.org/r/20221209020618.190306-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Vishal Moola (Oracle) Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- mm/swap.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'mm/swap.c') diff --git a/mm/swap.c b/mm/swap.c index 70e2063ef43a..5e5eba186930 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -757,16 +757,14 @@ void deactivate_page(struct page *page) } /** - * mark_page_lazyfree - make an anon page lazyfree - * @page: page to deactivate + * folio_mark_lazyfree - make an anon folio lazyfree + * @folio: folio to deactivate * - * mark_page_lazyfree() moves @page to the inactive file list. - * This is done to accelerate the reclaim of @page. + * folio_mark_lazyfree() moves @folio to the inactive file list. + * This is done to accelerate the reclaim of @folio. */ -void mark_page_lazyfree(struct page *page) +void folio_mark_lazyfree(struct folio *folio) { - struct folio *folio = page_folio(page); - if (folio_test_lru(folio) && folio_test_anon(folio) && folio_test_swapbacked(folio) && !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) { -- cgit v1.2.3-73-gaa49b From 5a9e34747c9f731bbb6b7fd7521c4fec0d840593 Mon Sep 17 00:00:00 2001 From: "Vishal Moola (Oracle)" Date: Wed, 21 Dec 2022 10:08:48 -0800 Subject: mm/swap: convert deactivate_page() to folio_deactivate() Deactivate_page() has already been converted to use folios, this change converts it to take in a folio argument instead of calling page_folio(). It also renames the function folio_deactivate() to be more consistent with other folio functions. [akpm@linux-foundation.org: fix left-over comments, per Yu Zhao] Link: https://lkml.kernel.org/r/20221221180848.20774-5-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: SeongJae Park Signed-off-by: Andrew Morton --- include/linux/swap.h | 2 +- mm/damon/paddr.c | 2 +- mm/madvise.c | 4 ++-- mm/page-writeback.c | 4 ++-- mm/swap.c | 14 ++++++-------- mm/vmscan.c | 2 +- 6 files changed, 13 insertions(+), 15 deletions(-) (limited to 'mm/swap.c') diff --git a/include/linux/swap.h b/include/linux/swap.h index 93f1cebd8545..87cecb8c0bdc 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -401,7 +401,7 @@ extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu_zone(struct zone *zone); extern void lru_add_drain_all(void); -extern void deactivate_page(struct page *page); +void folio_deactivate(struct folio *folio); void folio_mark_lazyfree(struct folio *folio); extern void swap_setup(void); diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 884c8bf18b12..6334c99e5152 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -297,7 +297,7 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( if (mark_accessed) folio_mark_accessed(folio); else - deactivate_page(&folio->page); + folio_deactivate(folio); folio_put(folio); applied += folio_nr_pages(folio); } diff --git a/mm/madvise.c b/mm/madvise.c index 575ebf0363b8..e407d335e614 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -416,7 +416,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, list_add(&folio->lru, &folio_list); } } else - deactivate_page(&folio->page); + folio_deactivate(folio); huge_unlock: spin_unlock(ptl); if (pageout) @@ -510,7 +510,7 @@ regular_folio: list_add(&folio->lru, &folio_list); } } else - deactivate_page(&folio->page); + folio_deactivate(folio); } arch_leave_lazy_mmu_mode(); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index ad608ef2a243..41128ea9c997 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2846,11 +2846,11 @@ bool folio_mark_dirty(struct folio *folio) if (likely(mapping)) { /* - * readahead/lru_deactivate_page could remain + * readahead/folio_deactivate could remain * PG_readahead/PG_reclaim due to race with folio_end_writeback * About readahead, if the folio is written, the flags would be * reset. So no problem. - * About lru_deactivate_page, if the folio is redirtied, + * About folio_deactivate, if the folio is redirtied, * the flag will be reset. So no problem. but if the * folio is used by readahead it will confuse readahead * and make it restart the size rampup process. But it's diff --git a/mm/swap.c b/mm/swap.c index 5e5eba186930..e54e2a252e27 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -733,17 +733,15 @@ void deactivate_file_folio(struct folio *folio) } /* - * deactivate_page - deactivate a page - * @page: page to deactivate + * folio_deactivate - deactivate a folio + * @folio: folio to deactivate * - * deactivate_page() moves @page to the inactive list if @page was on the active - * list and was not an unevictable page. This is done to accelerate the reclaim - * of @page. + * folio_deactivate() moves @folio to the inactive list if @folio was on the + * active list and was not unevictable. This is done to accelerate the + * reclaim of @folio. */ -void deactivate_page(struct page *page) +void folio_deactivate(struct folio *folio) { - struct folio *folio = page_folio(page); - if (folio_test_lru(folio) && !folio_test_unevictable(folio) && (folio_test_active(folio) || lru_gen_enabled())) { struct folio_batch *fbatch; diff --git a/mm/vmscan.c b/mm/vmscan.c index bd6637fcd8f9..aa8c252949da 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1920,7 +1920,7 @@ retry: !test_bit(PGDAT_DIRTY, &pgdat->flags))) { /* * Immediately reclaim when written back. - * Similar in principle to deactivate_page() + * Similar in principle to folio_deactivate() * except we already have the folio isolated * and know it's dirty */ -- cgit v1.2.3-73-gaa49b From 96f97c438f61ddba94117dcd1a1eb0aaafa22309 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 12 Jan 2023 12:39:31 +0000 Subject: mm: mlock: update the interface to use folios Update the mlock interface to accept folios rather than pages, bringing the interface in line with the internal implementation. munlock_vma_page() still requires a page_folio() conversion, however this is consistent with the existent mlock_vma_page() implementation and a product of rmap still dealing in pages rather than folios. Link: https://lkml.kernel.org/r/cba12777c5544305014bc0cbec56bb4cc71477d8.1673526881.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Acked-by: Vlastimil Babka Cc: Christian Brauner Cc: Geert Uytterhoeven Cc: Hugh Dickins Cc: Joel Fernandes (Google) Cc: Jonathan Corbet Cc: Liam R. Howlett Cc: Matthew Wilcox Cc: Mike Rapoport (IBM) Cc: William Kucharski Signed-off-by: Andrew Morton --- mm/internal.h | 38 ++++++++++++++++++++++---------------- mm/migrate.c | 2 +- mm/mlock.c | 38 ++++++++++++++++++-------------------- mm/page_alloc.c | 2 +- mm/rmap.c | 4 ++-- mm/swap.c | 10 +++++----- 6 files changed, 49 insertions(+), 45 deletions(-) (limited to 'mm/swap.c') diff --git a/mm/internal.h b/mm/internal.h index 583e15357e09..973b48e8b1af 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -533,10 +533,9 @@ extern int mlock_future_check(struct mm_struct *mm, unsigned long flags, * should be called with vma's mmap_lock held for read or write, * under page table lock for the pte/pmd being added or removed. * - * mlock is usually called at the end of page_add_*_rmap(), - * munlock at the end of page_remove_rmap(); but new anon - * pages are managed by lru_cache_add_inactive_or_unevictable() - * calling mlock_new_page(). + * mlock is usually called at the end of page_add_*_rmap(), munlock at + * the end of page_remove_rmap(); but new anon folios are managed by + * folio_add_lru_vma() calling mlock_new_folio(). * * @compound is used to include pmd mappings of THPs, but filter out * pte mappings of THPs, which cannot be consistently counted: a pte @@ -565,18 +564,25 @@ static inline void mlock_vma_page(struct page *page, mlock_vma_folio(page_folio(page), vma, compound); } -void munlock_page(struct page *page); -static inline void munlock_vma_page(struct page *page, +void munlock_folio(struct folio *folio); + +static inline void munlock_vma_folio(struct folio *folio, struct vm_area_struct *vma, bool compound) { if (unlikely(vma->vm_flags & VM_LOCKED) && - (compound || !PageTransCompound(page))) - munlock_page(page); + (compound || !folio_test_large(folio))) + munlock_folio(folio); +} + +static inline void munlock_vma_page(struct page *page, + struct vm_area_struct *vma, bool compound) +{ + munlock_vma_folio(page_folio(page), vma, compound); } -void mlock_new_page(struct page *page); -bool need_mlock_page_drain(int cpu); -void mlock_page_drain_local(void); -void mlock_page_drain_remote(int cpu); +void mlock_new_folio(struct folio *folio); +bool need_mlock_drain(int cpu); +void mlock_drain_local(void); +void mlock_drain_remote(int cpu); extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); @@ -665,10 +671,10 @@ static inline void mlock_vma_page(struct page *page, struct vm_area_struct *vma, bool compound) { } static inline void munlock_vma_page(struct page *page, struct vm_area_struct *vma, bool compound) { } -static inline void mlock_new_page(struct page *page) { } -static inline bool need_mlock_page_drain(int cpu) { return false; } -static inline void mlock_page_drain_local(void) { } -static inline void mlock_page_drain_remote(int cpu) { } +static inline void mlock_new_folio(struct folio *folio) { } +static inline bool need_mlock_drain(int cpu) { return false; } +static inline void mlock_drain_local(void) { } +static inline void mlock_drain_remote(int cpu) { } static inline void vunmap_range_noflush(unsigned long start, unsigned long end) { } diff --git a/mm/migrate.c b/mm/migrate.c index 98de7ce2b576..206fcdbe67f3 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -265,7 +265,7 @@ static bool remove_migration_pte(struct folio *folio, set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); } if (vma->vm_flags & VM_LOCKED) - mlock_page_drain_local(); + mlock_drain_local(); trace_remove_migration_pte(pvmw.address, pte_val(pte), compound_order(new)); diff --git a/mm/mlock.c b/mm/mlock.c index f8e8d30ab08a..9e9c8be58277 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -210,7 +210,7 @@ static void mlock_folio_batch(struct folio_batch *fbatch) folio_batch_reinit(fbatch); } -void mlock_page_drain_local(void) +void mlock_drain_local(void) { struct folio_batch *fbatch; @@ -221,7 +221,7 @@ void mlock_page_drain_local(void) local_unlock(&mlock_fbatch.lock); } -void mlock_page_drain_remote(int cpu) +void mlock_drain_remote(int cpu) { struct folio_batch *fbatch; @@ -231,7 +231,7 @@ void mlock_page_drain_remote(int cpu) mlock_folio_batch(fbatch); } -bool need_mlock_page_drain(int cpu) +bool need_mlock_drain(int cpu) { return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu)); } @@ -262,13 +262,12 @@ void mlock_folio(struct folio *folio) } /** - * mlock_new_page - mlock a newly allocated page not yet on LRU - * @page: page to be mlocked, either a normal page or a THP head. + * mlock_new_folio - mlock a newly allocated folio not yet on LRU + * @folio: folio to be mlocked, either normal or a THP head. */ -void mlock_new_page(struct page *page) +void mlock_new_folio(struct folio *folio) { struct folio_batch *fbatch; - struct folio *folio = page_folio(page); int nr_pages = folio_nr_pages(folio); local_lock(&mlock_fbatch.lock); @@ -286,13 +285,12 @@ void mlock_new_page(struct page *page) } /** - * munlock_page - munlock a page - * @page: page to be munlocked, either a normal page or a THP head. + * munlock_folio - munlock a folio + * @folio: folio to be munlocked, either normal or a THP head. */ -void munlock_page(struct page *page) +void munlock_folio(struct folio *folio) { struct folio_batch *fbatch; - struct folio *folio = page_folio(page); local_lock(&mlock_fbatch.lock); fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); @@ -314,7 +312,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, struct vm_area_struct *vma = walk->vma; spinlock_t *ptl; pte_t *start_pte, *pte; - struct page *page; + struct folio *folio; ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { @@ -322,11 +320,11 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, goto out; if (is_huge_zero_pmd(*pmd)) goto out; - page = pmd_page(*pmd); + folio = page_folio(pmd_page(*pmd)); if (vma->vm_flags & VM_LOCKED) - mlock_folio(page_folio(page)); + mlock_folio(folio); else - munlock_page(page); + munlock_folio(folio); goto out; } @@ -334,15 +332,15 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) { if (!pte_present(*pte)) continue; - page = vm_normal_page(vma, addr, *pte); - if (!page || is_zone_device_page(page)) + folio = vm_normal_folio(vma, addr, *pte); + if (!folio || folio_is_zone_device(folio)) continue; - if (PageTransCompound(page)) + if (folio_test_large(folio)) continue; if (vma->vm_flags & VM_LOCKED) - mlock_folio(page_folio(page)); + mlock_folio(folio); else - munlock_page(page); + munlock_folio(folio); } pte_unmap(start_pte); out: diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 88494e82843d..83be3b571fd0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -8587,7 +8587,7 @@ static int page_alloc_cpu_dead(unsigned int cpu) struct zone *zone; lru_add_drain_cpu(cpu); - mlock_page_drain_remote(cpu); + mlock_drain_remote(cpu); drain_pages(cpu); /* diff --git a/mm/rmap.c b/mm/rmap.c index a079d9964b9c..073999f78adf 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1764,7 +1764,7 @@ discard: */ page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); if (vma->vm_flags & VM_LOCKED) - mlock_page_drain_local(); + mlock_drain_local(); folio_put(folio); } @@ -2105,7 +2105,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, */ page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); if (vma->vm_flags & VM_LOCKED) - mlock_page_drain_local(); + mlock_drain_local(); folio_put(folio); } diff --git a/mm/swap.c b/mm/swap.c index e54e2a252e27..42d67f9baa8c 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -562,7 +562,7 @@ void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma) VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED)) - mlock_new_page(&folio->page); + mlock_new_folio(folio); else folio_add_lru(folio); } @@ -781,7 +781,7 @@ void lru_add_drain(void) local_lock(&cpu_fbatches.lock); lru_add_drain_cpu(smp_processor_id()); local_unlock(&cpu_fbatches.lock); - mlock_page_drain_local(); + mlock_drain_local(); } /* @@ -796,7 +796,7 @@ static void lru_add_and_bh_lrus_drain(void) lru_add_drain_cpu(smp_processor_id()); local_unlock(&cpu_fbatches.lock); invalidate_bh_lrus_cpu(); - mlock_page_drain_local(); + mlock_drain_local(); } void lru_add_drain_cpu_zone(struct zone *zone) @@ -805,7 +805,7 @@ void lru_add_drain_cpu_zone(struct zone *zone) lru_add_drain_cpu(smp_processor_id()); drain_local_pages(zone); local_unlock(&cpu_fbatches.lock); - mlock_page_drain_local(); + mlock_drain_local(); } #ifdef CONFIG_SMP @@ -828,7 +828,7 @@ static bool cpu_needs_drain(unsigned int cpu) folio_batch_count(&fbatches->lru_deactivate) || folio_batch_count(&fbatches->lru_lazyfree) || folio_batch_count(&fbatches->activate) || - need_mlock_page_drain(cpu) || + need_mlock_drain(cpu) || has_bh_in_lru(cpu, NULL); } -- cgit v1.2.3-73-gaa49b From c5792d9384113de4085dfbce6940e2a853debb67 Mon Sep 17 00:00:00 2001 From: "Vishal Moola (Oracle)" Date: Wed, 4 Jan 2023 13:14:48 -0800 Subject: filemap: remove find_get_pages_range_tag() All callers to find_get_pages_range_tag(), find_get_pages_tag(), pagevec_lookup_range_tag(), and pagevec_lookup_tag() have been removed. Link: https://lkml.kernel.org/r/20230104211448.4804-24-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- include/linux/pagemap.h | 10 --------- include/linux/pagevec.h | 8 ------- mm/filemap.c | 60 ------------------------------------------------- mm/swap.c | 10 --------- 4 files changed, 88 deletions(-) (limited to 'mm/swap.c') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index bb3c1d51b1cb..9f1081683771 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -741,16 +741,6 @@ unsigned filemap_get_folios_contig(struct address_space *mapping, pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch); -unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, - pgoff_t end, xa_mark_t tag, unsigned int nr_pages, - struct page **pages); -static inline unsigned find_get_pages_tag(struct address_space *mapping, - pgoff_t *index, xa_mark_t tag, unsigned int nr_pages, - struct page **pages) -{ - return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, - nr_pages, pages); -} struct page *grab_cache_page_write_begin(struct address_space *mapping, pgoff_t index); diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index 2a6f61a0c10a..f582f7213ea5 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -26,14 +26,6 @@ struct pagevec { }; void __pagevec_release(struct pagevec *pvec); -unsigned pagevec_lookup_range_tag(struct pagevec *pvec, - struct address_space *mapping, pgoff_t *index, pgoff_t end, - xa_mark_t tag); -static inline unsigned pagevec_lookup_tag(struct pagevec *pvec, - struct address_space *mapping, pgoff_t *index, xa_mark_t tag) -{ - return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag); -} static inline void pagevec_init(struct pagevec *pvec) { diff --git a/mm/filemap.c b/mm/filemap.c index 85adbcf2d9a7..31bf18ec6d01 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2337,66 +2337,6 @@ out: } EXPORT_SYMBOL(filemap_get_folios_tag); -/** - * find_get_pages_range_tag - Find and return head pages matching @tag. - * @mapping: the address_space to search - * @index: the starting page index - * @end: The final page index (inclusive) - * @tag: the tag index - * @nr_pages: the maximum number of pages - * @pages: where the resulting pages are placed - * - * Like find_get_pages_range(), except we only return head pages which are - * tagged with @tag. @index is updated to the index immediately after the - * last page we return, ready for the next iteration. - * - * Return: the number of pages which were found. - */ -unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, - pgoff_t end, xa_mark_t tag, unsigned int nr_pages, - struct page **pages) -{ - XA_STATE(xas, &mapping->i_pages, *index); - struct folio *folio; - unsigned ret = 0; - - if (unlikely(!nr_pages)) - return 0; - - rcu_read_lock(); - while ((folio = find_get_entry(&xas, end, tag))) { - /* - * Shadow entries should never be tagged, but this iteration - * is lockless so there is a window for page reclaim to evict - * a page we saw tagged. Skip over it. - */ - if (xa_is_value(folio)) - continue; - - pages[ret] = &folio->page; - if (++ret == nr_pages) { - *index = folio->index + folio_nr_pages(folio); - goto out; - } - } - - /* - * We come here when we got to @end. We take care to not overflow the - * index @index as it confuses some of the callers. This breaks the - * iteration when there is a page at index -1 but that is already - * broken anyway. - */ - if (end == (pgoff_t)-1) - *index = (pgoff_t)-1; - else - *index = end + 1; -out: - rcu_read_unlock(); - - return ret; -} -EXPORT_SYMBOL(find_get_pages_range_tag); - /* * CD/DVDs are error prone. When a medium error occurs, the driver may fail * a _large_ part of the i/o request. Imagine the worst scenario: diff --git a/mm/swap.c b/mm/swap.c index 42d67f9baa8c..5e4f92700c16 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -1115,16 +1115,6 @@ void folio_batch_remove_exceptionals(struct folio_batch *fbatch) fbatch->nr = j; } -unsigned pagevec_lookup_range_tag(struct pagevec *pvec, - struct address_space *mapping, pgoff_t *index, pgoff_t end, - xa_mark_t tag) -{ - pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, - PAGEVEC_SIZE, pvec->pages); - return pagevec_count(pvec); -} -EXPORT_SYMBOL(pagevec_lookup_range_tag); - /* * Perform any setup for the swap system */ -- cgit v1.2.3-73-gaa49b From e0650a41f7d024b72669a2a2db846ef70281abd8 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 16 Jan 2023 19:28:27 +0000 Subject: mm: clean up mlock_page / munlock_page references in comments Change documentation and comments that refer to now-renamed functions. Link: https://lkml.kernel.org/r/20230116192827.2146732-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- Documentation/mm/unevictable-lru.rst | 30 ++++++++++++++++-------------- mm/memory-failure.c | 2 +- mm/swap.c | 4 ++-- 3 files changed, 19 insertions(+), 17 deletions(-) (limited to 'mm/swap.c') diff --git a/Documentation/mm/unevictable-lru.rst b/Documentation/mm/unevictable-lru.rst index 34b8b098c5bc..53e59433497a 100644 --- a/Documentation/mm/unevictable-lru.rst +++ b/Documentation/mm/unevictable-lru.rst @@ -298,7 +298,7 @@ treated as a no-op and mlock_fixup() simply returns. If the VMA passes some filtering as described in "Filtering Special VMAs" below, mlock_fixup() will attempt to merge the VMA with its neighbors or split off a subset of the VMA if the range does not cover the entire VMA. Any pages -already present in the VMA are then marked as mlocked by mlock_page() via +already present in the VMA are then marked as mlocked by mlock_folio() via mlock_pte_range() via walk_page_range() via mlock_vma_pages_range(). Before returning from the system call, do_mlock() or mlockall() will call @@ -373,20 +373,21 @@ Because of the VMA filtering discussed above, VM_LOCKED will not be set in any "special" VMAs. So, those VMAs will be ignored for munlock. If the VMA is VM_LOCKED, mlock_fixup() again attempts to merge or split off the -specified range. All pages in the VMA are then munlocked by munlock_page() via +specified range. All pages in the VMA are then munlocked by munlock_folio() via mlock_pte_range() via walk_page_range() via mlock_vma_pages_range() - the same function used when mlocking a VMA range, with new flags for the VMA indicating that it is munlock() being performed. -munlock_page() uses the mlock pagevec to batch up work to be done under -lru_lock by __munlock_page(). __munlock_page() decrements the page's -mlock_count, and when that reaches 0 it clears PG_mlocked and clears -PG_unevictable, moving the page from unevictable state to inactive LRU. +munlock_folio() uses the mlock pagevec to batch up work to be done +under lru_lock by __munlock_folio(). __munlock_folio() decrements the +folio's mlock_count, and when that reaches 0 it clears the mlocked flag +and clears the unevictable flag, moving the folio from unevictable state +to the inactive LRU. -But in practice that may not work ideally: the page may not yet have reached +But in practice that may not work ideally: the folio may not yet have reached "the unevictable LRU", or it may have been temporarily isolated from it. In those cases its mlock_count field is unusable and must be assumed to be 0: so -that the page will be rescued to an evictable LRU, then perhaps be mlocked +that the folio will be rescued to an evictable LRU, then perhaps be mlocked again later if vmscan finds it in a VM_LOCKED VMA. @@ -489,15 +490,16 @@ For each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls munlock_vma_folio(), which calls munlock_folio() when the VMA is VM_LOCKED (unless it was a PTE mapping of a part of a transparent huge page). -munlock_page() uses the mlock pagevec to batch up work to be done under -lru_lock by __munlock_page(). __munlock_page() decrements the page's -mlock_count, and when that reaches 0 it clears PG_mlocked and clears -PG_unevictable, moving the page from unevictable state to inactive LRU. +munlock_folio() uses the mlock pagevec to batch up work to be done +under lru_lock by __munlock_folio(). __munlock_folio() decrements the +folio's mlock_count, and when that reaches 0 it clears the mlocked flag +and clears the unevictable flag, moving the folio from unevictable state +to the inactive LRU. -But in practice that may not work ideally: the page may not yet have reached +But in practice that may not work ideally: the folio may not yet have reached "the unevictable LRU", or it may have been temporarily isolated from it. In those cases its mlock_count field is unusable and must be assumed to be 0: so -that the page will be rescued to an evictable LRU, then perhaps be mlocked +that the folio will be rescued to an evictable LRU, then perhaps be mlocked again later if vmscan finds it in a VM_LOCKED VMA. diff --git a/mm/memory-failure.c b/mm/memory-failure.c index ba0bbfc074ee..38f84bff8bdf 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -2167,7 +2167,7 @@ try_again: } /* - * __munlock_pagevec may clear a writeback page's LRU flag without + * __munlock_folio() may clear a writeback page's LRU flag without * page_lock. We need wait writeback completion for this page or it * may trigger vfs BUG while evict inode. */ diff --git a/mm/swap.c b/mm/swap.c index 5e4f92700c16..2a51faa34e64 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -201,7 +201,7 @@ static void lru_add_fn(struct lruvec *lruvec, struct folio *folio) * Is an smp_mb__after_atomic() still required here, before * folio_evictable() tests the mlocked flag, to rule out the possibility * of stranding an evictable folio on an unevictable LRU? I think - * not, because __munlock_page() only clears the mlocked flag + * not, because __munlock_folio() only clears the mlocked flag * while the LRU lock is held. * * (That is not true of __page_cache_release(), and not necessarily @@ -216,7 +216,7 @@ static void lru_add_fn(struct lruvec *lruvec, struct folio *folio) folio_set_unevictable(folio); /* * folio->mlock_count = !!folio_test_mlocked(folio)? - * But that leaves __mlock_page() in doubt whether another + * But that leaves __mlock_folio() in doubt whether another * actor has already counted the mlock or not. Err on the * safe side, underestimate, let page reclaim fix it, rather * than leaving a page on the unevictable LRU indefinitely. -- cgit v1.2.3-73-gaa49b