aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Hildenbrand <[email protected]>2022-03-24 18:13:56 -0700
committerLinus Torvalds <[email protected]>2022-03-24 19:06:51 -0700
commit7f7609175ff2339a2cffa48ba2474ef928d7eafd (patch)
tree9fcd1debb0cf0d2e0e815bb3cb8f289cb38a6339
parent55c62fa7c53368a9011cd1276c001a1469078c6a (diff)
mm/huge_memory: remove stale locking logic from __split_huge_pmd()
Let's remove the stale logic that was required for reuse_swap_page(). [[email protected]: simplification, per Yang Shi] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: David Hildenbrand <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: David Rientjes <[email protected]> Cc: Don Dutile <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Jan Kara <[email protected]> Cc: Jann Horn <[email protected]> Cc: Jason Gunthorpe <[email protected]> Cc: John Hubbard <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Liang Zhang <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Mike Kravetz <[email protected]> Cc: Mike Rapoport <[email protected]> Cc: Nadav Amit <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: Peter Xu <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Roman Gushchin <[email protected]> Cc: Shakeel Butt <[email protected]> Cc: Yang Shi <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r--mm/huge_memory.c40
1 files changed, 4 insertions, 36 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index cc580d90117b..2fe38212e07c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2133,8 +2133,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
{
spinlock_t *ptl;
struct mmu_notifier_range range;
- bool do_unlock_folio = false;
- pmd_t _pmd;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
address & HPAGE_PMD_MASK,
@@ -2153,42 +2151,12 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
goto out;
}
-repeat:
- if (pmd_trans_huge(*pmd)) {
- if (!folio) {
- folio = page_folio(pmd_page(*pmd));
- /*
- * An anonymous page must be locked, to ensure that a
- * concurrent reuse_swap_page() sees stable mapcount;
- * but reuse_swap_page() is not used on shmem or file,
- * and page lock must not be taken when zap_pmd_range()
- * calls __split_huge_pmd() while i_mmap_lock is held.
- */
- if (folio_test_anon(folio)) {
- if (unlikely(!folio_trylock(folio))) {
- folio_get(folio);
- _pmd = *pmd;
- spin_unlock(ptl);
- folio_lock(folio);
- spin_lock(ptl);
- if (unlikely(!pmd_same(*pmd, _pmd))) {
- folio_unlock(folio);
- folio_put(folio);
- folio = NULL;
- goto repeat;
- }
- folio_put(folio);
- }
- do_unlock_folio = true;
- }
- }
- } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
- goto out;
- __split_huge_pmd_locked(vma, pmd, range.start, freeze);
+ if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
+ is_pmd_migration_entry(*pmd))
+ __split_huge_pmd_locked(vma, pmd, range.start, freeze);
+
out:
spin_unlock(ptl);
- if (do_unlock_folio)
- folio_unlock(folio);
/*
* No need to double call mmu_notifier->invalidate_range() callback.
* They are 3 cases to consider inside __split_huge_pmd_locked():