aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiaqi Yan <[email protected]>2024-06-26 05:08:15 +0000
committerAndrew Morton <[email protected]>2024-07-04 18:05:59 -0700
commit865319f772e6d5b8c932ff0abd7a9e57fe1c04e0 (patch)
tree2a0c555213c9687c9d177f18688b40380786f6f7
parentc2fad56b3c12a5fc6ea7426d2e39459e85e5b55e (diff)
mm/memory-failure: refactor log format in soft offline code
Patch series "Userspace controls soft-offline pages", v6. Correctable memory errors are very common on servers with large amount of memory, and are corrected by ECC, but with two pain points to users: 1. Correction usually happens on the fly and adds latency overhead 2. Not-fully-proved theory states excessive correctable memory errors can develop into uncorrectable memory error. Soft offline is kernel's additional solution for memory pages having (excessive) corrected memory errors. Impacted page is migrated to healthy page if it is in use, then the original page is discarded for any future use. The actual policy on whether (and when) to soft offline should be maintained by userspace, especially in case of an 1G HugeTLB page. Soft-offline dissolves the HugeTLB page, either in-use or free, into chunks of 4K pages, reducing HugeTLB pool capacity by 1 hugepage. If userspace has not acknowledged such behavior, it may be surprised when later mmap hugepages MAP_FAILED due to lack of hugepages. In case of a transparent hugepage, it will be split into 4K pages as well; userspace will stop enjoying the transparent performance. In addition, discarding the entire 1G HugeTLB page only because of corrected memory errors sounds very costly and kernel better not doing under the hood. But today there are at least 2 such cases: 1. GHES driver sees both GHES_SEV_CORRECTED and CPER_SEC_ERROR_THRESHOLD_EXCEEDED after parsing CPER. 2. RAS Correctable Errors Collector counts correctable errors per PFN and when the counter for a PFN reaches threshold In both cases, userspace has no control of the soft offline performed by kernel's memory failure recovery. This patch series give userspace the control of softofflining any page: kernel only soft offlines raw page / transparent hugepage / HugeTLB hugepage if userspace has agreed to. The interface to userspace is a new sysctl called enable_soft_offline under /proc/sys/vm. By default enable_soft_line is 1 to preserve existing behavior in kernel. This patch (of 4): Logs from soft_offline_page and soft_offline_in_use_page have different formats than majority of the memory failure code: "Memory failure: 0x${pfn}: ${lower_case_message}" Convert them to the following format: "Soft offline: 0x${pfn}: ${lower_case_message}" No functional change in this commit. Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Jiaqi Yan <[email protected]> Acked-by: Miaohe Lin <[email protected]> Reviewed-by: Lance Yang <[email protected]> Cc: David Rientjes <[email protected]> Cc: Frank van der Linden <[email protected]> Cc: Jane Chu <[email protected]> Cc: Jonathan Corbet <[email protected]> Cc: Muchun Song <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: Oscar Salvador <[email protected]> Cc: Randy Dunlap <[email protected]> Cc: Shuah Khan <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
-rw-r--r--mm/memory-failure.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index ab85b938b613..fa9671d2e7a7 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -2640,6 +2640,9 @@ unlock_mutex:
}
EXPORT_SYMBOL(unpoison_memory);
+#undef pr_fmt
+#define pr_fmt(fmt) "Soft offline: " fmt
+
static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist)
{
bool isolated = false;
@@ -2695,7 +2698,7 @@ static int soft_offline_in_use_page(struct page *page)
if (!huge && folio_test_large(folio)) {
if (try_to_split_thp_page(page, true)) {
- pr_info("soft offline: %#lx: thp split failed\n", pfn);
+ pr_info("%#lx: thp split failed\n", pfn);
return -EBUSY;
}
folio = page_folio(page);
@@ -2707,7 +2710,7 @@ static int soft_offline_in_use_page(struct page *page)
if (PageHWPoison(page)) {
folio_unlock(folio);
folio_put(folio);
- pr_info("soft offline: %#lx page already poisoned\n", pfn);
+ pr_info("%#lx: page already poisoned\n", pfn);
return 0;
}
@@ -2720,7 +2723,7 @@ static int soft_offline_in_use_page(struct page *page)
folio_unlock(folio);
if (ret) {
- pr_info("soft_offline: %#lx: invalidated\n", pfn);
+ pr_info("%#lx: invalidated\n", pfn);
page_handle_poison(page, false, true);
return 0;
}
@@ -2737,13 +2740,13 @@ static int soft_offline_in_use_page(struct page *page)
if (!list_empty(&pagelist))
putback_movable_pages(&pagelist);
- pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n",
+ pr_info("%#lx: %s migration failed %ld, type %pGp\n",
pfn, msg_page[huge], ret, &page->flags);
if (ret > 0)
ret = -EBUSY;
}
} else {
- pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n",
+ pr_info("%#lx: %s isolation failed, page count %d, type %pGp\n",
pfn, msg_page[huge], page_count(page), &page->flags);
ret = -EBUSY;
}
@@ -2795,7 +2798,7 @@ int soft_offline_page(unsigned long pfn, int flags)
mutex_lock(&mf_mutex);
if (PageHWPoison(page)) {
- pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
+ pr_info("%#lx: page already poisoned\n", pfn);
put_ref_page(pfn, flags);
mutex_unlock(&mf_mutex);
return 0;