diff options
author | Kefeng Wang <[email protected]> | 2023-10-18 22:07:56 +0800 |
---|---|---|
committer | Andrew Morton <[email protected]> | 2023-10-25 16:47:12 -0700 |
commit | ec1778807a8053d14cde7cfd75fbd66e0c7b9c9f (patch) | |
tree | 8f2e82a3bc942b06336d028117a1214c3166d987 | |
parent | 0b201c3624ae9f58ebfff8484f304f3008fb01b8 (diff) |
mm: mprotect: use a folio in change_pte_range()
Use a folio in change_pte_range() to save three compound_head() calls.
Since now only normal and PMD-mapped page is handled by numa balancing,
it is enough to only update the entire folio's access time.
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Kefeng Wang <[email protected]>
Cc: David Hildenbrand <[email protected]>
Cc: Huang Ying <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Juri Lelli <[email protected]>
Cc: Matthew Wilcox (Oracle) <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Vincent Guittot <[email protected]>
Cc: Zi Yan <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
-rw-r--r-- | mm/mprotect.c | 16 |
1 files changed, 9 insertions, 7 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c index f1dc8f8c84ef..81991102f785 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -114,7 +114,7 @@ static long change_pte_range(struct mmu_gather *tlb, * pages. See similar comment in change_huge_pmd. */ if (prot_numa) { - struct page *page; + struct folio *folio; int nid; bool toptier; @@ -122,13 +122,14 @@ static long change_pte_range(struct mmu_gather *tlb, if (pte_protnone(oldpte)) continue; - page = vm_normal_page(vma, addr, oldpte); - if (!page || is_zone_device_page(page) || PageKsm(page)) + folio = vm_normal_folio(vma, addr, oldpte); + if (!folio || folio_is_zone_device(folio) || + folio_test_ksm(folio)) continue; /* Also skip shared copy-on-write pages */ if (is_cow_mapping(vma->vm_flags) && - page_count(page) != 1) + folio_ref_count(folio) != 1) continue; /* @@ -136,14 +137,15 @@ static long change_pte_range(struct mmu_gather *tlb, * it cannot move them all from MIGRATE_ASYNC * context. */ - if (page_is_file_lru(page) && PageDirty(page)) + if (folio_is_file_lru(folio) && + folio_test_dirty(folio)) continue; /* * Don't mess with PTEs if page is already on the node * a single-threaded process is running on. */ - nid = page_to_nid(page); + nid = folio_nid(folio); if (target_node == nid) continue; toptier = node_is_toptier(nid); @@ -157,7 +159,7 @@ static long change_pte_range(struct mmu_gather *tlb, continue; if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING && !toptier) - xchg_page_access_time(page, + folio_xchg_access_time(folio, jiffies_to_msecs(jiffies)); } |