aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Xu <[email protected]>2022-12-16 10:52:23 -0500
committerAndrew Morton <[email protected]>2023-01-18 17:12:39 -0800
commiteefc7fa53608920203a1402ecf7255ecfa8bb030 (patch)
tree36b0442895fa08651d505d449c7eb1a3c96bd426
parent7d049f3a03ea705522210d70b9d3e223ef86d663 (diff)
mm/hugetlb: make follow_hugetlb_page() safe to pmd unshare
Since follow_hugetlb_page() walks the pgtable, it needs the vma lock to make sure the pgtable page will not be freed concurrently. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Peter Xu <[email protected]> Acked-by: David Hildenbrand <[email protected]> Reviewed-by: Mike Kravetz <[email protected]> Reviewed-by: John Hubbard <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: James Houghton <[email protected]> Cc: Jann Horn <[email protected]> Cc: Miaohe Lin <[email protected]> Cc: Muchun Song <[email protected]> Cc: Nadav Amit <[email protected]> Cc: Rik van Riel <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
-rw-r--r--mm/hugetlb.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 807edc1410e5..da4c37553c08 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6454,6 +6454,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
break;
}
+ hugetlb_vma_lock_read(vma);
/*
* Some archs (sparc64, sh*) have multiple pte_ts to
* each hugepage. We have to make sure we get the
@@ -6478,6 +6479,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
!hugetlbfs_pagecache_present(h, vma, vaddr)) {
if (pte)
spin_unlock(ptl);
+ hugetlb_vma_unlock_read(vma);
remainder = 0;
break;
}
@@ -6499,6 +6501,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (pte)
spin_unlock(ptl);
+ hugetlb_vma_unlock_read(vma);
+
if (flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
else if (unshare)
@@ -6561,6 +6565,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
remainder -= pages_per_huge_page(h);
i += pages_per_huge_page(h);
spin_unlock(ptl);
+ hugetlb_vma_unlock_read(vma);
continue;
}
@@ -6590,6 +6595,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs,
flags))) {
spin_unlock(ptl);
+ hugetlb_vma_unlock_read(vma);
remainder = 0;
err = -ENOMEM;
break;
@@ -6601,6 +6607,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
i += refs;
spin_unlock(ptl);
+ hugetlb_vma_unlock_read(vma);
}
*nr_pages = remainder;
/*