diff options
author | Vishal Moola (Oracle) <[email protected]> | 2022-11-01 10:53:24 -0700 |
---|---|---|
committer | Andrew Morton <[email protected]> | 2022-12-11 18:12:13 -0800 |
commit | 28965f0f8be62e1ed8296fe0240b5d5dc064b681 (patch) | |
tree | fda658fdc4bfe705bd9c9d6509548ebca74e7dbc | |
parent | 063aaad792eef49a11d7575dc9914b43c0fa3792 (diff) |
userfaultfd: replace lru_cache functions with folio_add functions
Replaces lru_cache_add() and lru_cache_add_inactive_or_unevictable() with
folio_add_lru() and folio_add_lru_vma(). This is in preparation for the
removal of lru_cache_add().
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Vishal Moola (Oracle) <[email protected]>
Cc: Matthew Wilcox (Oracle) <[email protected]>
Cc: Mike Kravetz <[email protected]>
Cc: Miklos Szeredi <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
-rw-r--r-- | mm/userfaultfd.c | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 650ab6cfd5f4..b7a9479bece2 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -66,6 +66,7 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, bool vm_shared = dst_vma->vm_flags & VM_SHARED; bool page_in_cache = page_mapping(page); spinlock_t *ptl; + struct folio *folio; struct inode *inode; pgoff_t offset, max_off; @@ -113,14 +114,15 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, if (!pte_none_mostly(*dst_pte)) goto out_unlock; + folio = page_folio(page); if (page_in_cache) { /* Usually, cache pages are already added to LRU */ if (newly_allocated) - lru_cache_add(page); + folio_add_lru(folio); page_add_file_rmap(page, dst_vma, false); } else { page_add_new_anon_rmap(page, dst_vma, dst_addr); - lru_cache_add_inactive_or_unevictable(page, dst_vma); + folio_add_lru_vma(folio, dst_vma); } /* |