aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKefeng Wang <[email protected]>2024-06-26 16:53:27 +0800
committerAndrew Morton <[email protected]>2024-07-06 11:53:20 -0700
commitf00b295b9b61bb332b4f5951f479ab3aaeada5b8 (patch)
tree43046f4702eb979f09599eff5ddf0b96d9b6baf6
parent060913999d7a9e50c283fdb15253fc27974ddadc (diff)
fs: hugetlbfs: support poisoned recover from hugetlbfs_migrate_folio()
This is similar to __migrate_folio(), use folio_mc_copy() in HugeTLB folio migration to avoid panic when copy from poisoned folio. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Kefeng Wang <[email protected]> Cc: Alistair Popple <[email protected]> Cc: Benjamin LaHaise <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Jane Chu <[email protected]> Cc: Jérôme Glisse <[email protected]> Cc: Jiaqi Yan <[email protected]> Cc: Lance Yang <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Miaohe Lin <[email protected]> Cc: Muchun Song <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: Oscar Salvador <[email protected]> Cc: Tony Luck <[email protected]> Cc: Vishal Moola (Oracle) <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--mm/migrate.c10
2 files changed, 9 insertions, 3 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 9456e1d55540..ecad73a4f713 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1128,7 +1128,7 @@ static int hugetlbfs_migrate_folio(struct address_space *mapping,
hugetlb_set_folio_subpool(src, NULL);
}
- folio_migrate_copy(dst, src);
+ folio_migrate_flags(dst, src);
return MIGRATEPAGE_SUCCESS;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 9dd5eb846d38..da1e115cc403 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -550,10 +550,16 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src)
{
XA_STATE(xas, &mapping->i_pages, folio_index(src));
- int expected_count;
+ int rc, expected_count = folio_expected_refs(mapping, src);
+
+ if (folio_ref_count(src) != expected_count)
+ return -EAGAIN;
+
+ rc = folio_mc_copy(dst, src);
+ if (unlikely(rc))
+ return rc;
xas_lock_irq(&xas);
- expected_count = folio_expected_refs(mapping, src);
if (!folio_ref_freeze(src, expected_count)) {
xas_unlock_irq(&xas);
return -EAGAIN;