aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOscar Salvador <[email protected]>2024-05-28 22:53:23 +0200
committerAndrew Morton <[email protected]>2024-06-05 19:19:26 -0700
commit8daf9c702ee7f825f0de8600abff764acfedea13 (patch)
tree85e566f73d554ad7a37e1054b02b3f0ed7aa9e14
parentc2dc78b86e0821ecf9a9d0c35dba2618279a5bb6 (diff)
mm/hugetlb: do not call vma_add_reservation upon ENOMEM
sysbot reported a splat [1] on __unmap_hugepage_range(). This is because vma_needs_reservation() can return -ENOMEM if allocate_file_region_entries() fails to allocate the file_region struct for the reservation. Check for that and do not call vma_add_reservation() if that is the case, otherwise region_abort() and region_del() will see that we do not have any file_regions. If we detect that vma_needs_reservation() returned -ENOMEM, we clear the hugetlb_restore_reserve flag as if this reservation was still consumed, so free_huge_folio() will not increment the resv count. [1] https://lore.kernel.org/linux-mm/[email protected]/T/#ma5983bc1ab18a54910da83416b3f89f3c7ee43aa Link: https://lkml.kernel.org/r/[email protected] Fixes: df7a6d1f6405 ("mm/hugetlb: restore the reservation if needed") Signed-off-by: Oscar Salvador <[email protected]> Reported-and-tested-by: [email protected] Closes: https://lore.kernel.org/linux-mm/[email protected]/ Cc: Breno Leitao <[email protected]> Cc: Muchun Song <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
-rw-r--r--mm/hugetlb.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6be78e7d4f6e..f35abff8be60 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5768,8 +5768,20 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* do_exit() will not see it, and will keep the reservation
* forever.
*/
- if (adjust_reservation && vma_needs_reservation(h, vma, address))
- vma_add_reservation(h, vma, address);
+ if (adjust_reservation) {
+ int rc = vma_needs_reservation(h, vma, address);
+
+ if (rc < 0)
+ /* Pressumably allocate_file_region_entries failed
+ * to allocate a file_region struct. Clear
+ * hugetlb_restore_reserve so that global reserve
+ * count will not be incremented by free_huge_folio.
+ * Act as if we consumed the reservation.
+ */
+ folio_clear_hugetlb_restore_reserve(page_folio(page));
+ else if (rc)
+ vma_add_reservation(h, vma, address);
+ }
tlb_remove_page_size(tlb, page, huge_page_size(h));
/*