aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Hildenbrand <[email protected]>2024-05-29 13:19:02 +0200
committerAndrew Morton <[email protected]>2024-07-03 19:30:17 -0700
commite4d970acfb1e7d1e83ce3101dc3c01eae44938e1 (patch)
treef6f8ebc7435f85880b4c79929a8e0e80795a4d12
parent43d746dc49bb4c82034fce01a92fe67344d664cf (diff)
mm/page_alloc: clear PageBuddy using __ClearPageBuddy() for bad pages
Let's stop using page_mapcount_reset() and clear PageBuddy using __ClearPageBuddy() instead. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: David Hildenbrand <[email protected]> Tested-by: Sergey Senozhatsky <[email protected]> [zram/zsmalloc workloads] Cc: Hyeonggon Yoo <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Mike Rapoport (IBM) <[email protected]> Cc: Minchan Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
-rw-r--r--mm/page_alloc.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0576ac081a1f..8d7b4424b645 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -498,7 +498,8 @@ static void bad_page(struct page *page, const char *reason)
dump_stack();
out:
/* Leave bad fields for debug, except PageBuddy could make trouble */
- page_mapcount_reset(page); /* remove PageBuddy */
+ if (PageBuddy(page))
+ __ClearPageBuddy(page);
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
@@ -1351,7 +1352,8 @@ static void check_new_page_bad(struct page *page)
{
if (unlikely(page->flags & __PG_HWPOISON)) {
/* Don't complain about hwpoisoned pages */
- page_mapcount_reset(page); /* remove PageBuddy */
+ if (PageBuddy(page))
+ __ClearPageBuddy(page);
return;
}