aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kravetz <[email protected]>2023-10-16 19:55:49 -0700
committerAndrew Morton <[email protected]>2023-10-18 14:34:12 -0700
commit30a89adf872d2e46323840964c95dc0ae3bb5843 (patch)
treea9d5be1943a052141613e2636115ac3cfb0aa885
parent5ef8f1b2b4d9bd02e4104b9255351fb9279b1b4e (diff)
hugetlb: check for hugetlb folio before vmemmap_restore
In commit d8f5f7e445f0 ("hugetlb: set hugetlb page flag before optimizing vmemmap") checks were added to print a warning if hugetlb_vmemmap_restore was called on a non-hugetlb page. This was mostly due to ordering issues in the hugetlb page set up and tear down sequencees. One place missed was the routine dissolve_free_huge_page. Naoya Horiguchi noted: "I saw that VM_WARN_ON_ONCE() in hugetlb_vmemmap_restore is triggered when memory_failure() is called on a free hugetlb page with vmemmap optimization disabled (the warning is not triggered if vmemmap optimization is enabled). I think that we need check folio_test_hugetlb() before dissolve_free_huge_page() calls hugetlb_vmemmap_restore_folio()." Perform the check as suggested by Naoya. Link: https://lkml.kernel.org/r/20231017032140.GA3680@monkey Fixes: d8f5f7e445f0 ("hugetlb: set hugetlb page flag before optimizing vmemmap") Signed-off-by: Mike Kravetz <[email protected]> Suggested-by: Naoya Horiguchi <[email protected]> Tested-by: Naoya Horiguchi <[email protected]> Cc: Anshuman Khandual <[email protected]> Cc: Barry Song <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: David Rientjes <[email protected]> Cc: Joao Martins <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Miaohe Lin <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Muchun Song <[email protected]> Cc: Oscar Salvador <[email protected]> Cc: Xiongchun Duan <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
-rw-r--r--mm/hugetlb.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3fb3a5232173..dfb4534834f5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2322,17 +2322,23 @@ retry:
* need to adjust max_huge_pages if the page is not freed.
* Attempt to allocate vmemmmap here so that we can take
* appropriate action on failure.
+ *
+ * The folio_test_hugetlb check here is because
+ * remove_hugetlb_folio will clear hugetlb folio flag for
+ * non-vmemmap optimized hugetlb folios.
*/
- rc = hugetlb_vmemmap_restore(h, &folio->page);
- if (!rc) {
- update_and_free_hugetlb_folio(h, folio, false);
- } else {
- spin_lock_irq(&hugetlb_lock);
- add_hugetlb_folio(h, folio, false);
- h->max_huge_pages++;
- spin_unlock_irq(&hugetlb_lock);
- }
+ if (folio_test_hugetlb(folio)) {
+ rc = hugetlb_vmemmap_restore(h, &folio->page);
+ if (rc) {
+ spin_lock_irq(&hugetlb_lock);
+ add_hugetlb_folio(h, folio, false);
+ h->max_huge_pages++;
+ goto out;
+ }
+ } else
+ rc = 0;
+ update_and_free_hugetlb_folio(h, folio, false);
return rc;
}
out: