diff options
author | Pavel Tatashin <[email protected]> | 2021-05-04 18:38:49 -0700 |
---|---|---|
committer | Linus Torvalds <[email protected]> | 2021-05-05 11:27:26 -0700 |
commit | 6e7f34ebb8d25d71ce7f4580ba3cbfc10b895580 (patch) | |
tree | dfebaa6d9775b9c7ac2ce522ca26b20d10f211c1 | |
parent | f0f4463837da17a89d965dcbe4e411629dbcf308 (diff) |
mm/gup: check for isolation errors
It is still possible that we pin movable CMA pages if there are
isolation errors and cma_page_list stays empty when we check again.
Check for isolation errors, and return success only when there are no
isolation errors, and cma_page_list is empty after checking.
Because isolation errors are transient, we retry indefinitely.
Link: https://lkml.kernel.org/r/[email protected]
Fixes: 9a4e9f3b2d73 ("mm: update get_user_pages_longterm to migrate pages allocated from CMA region")
Signed-off-by: Pavel Tatashin <[email protected]>
Reviewed-by: Jason Gunthorpe <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: David Hildenbrand <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Ira Weiny <[email protected]>
Cc: James Morris <[email protected]>
Cc: Jason Gunthorpe <[email protected]>
Cc: John Hubbard <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Mike Kravetz <[email protected]>
Cc: Oscar Salvador <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Sasha Levin <[email protected]>
Cc: Steven Rostedt (VMware) <[email protected]>
Cc: Tyler Hicks <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r-- | mm/gup.c | 60 |
1 files changed, 34 insertions, 26 deletions
@@ -1608,8 +1608,8 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm, struct vm_area_struct **vmas, unsigned int gup_flags) { - unsigned long i; - bool drain_allow = true; + unsigned long i, isolation_error_count; + bool drain_allow; LIST_HEAD(cma_page_list); long ret = nr_pages; struct page *prev_head, *head; @@ -1620,6 +1620,8 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm, check_again: prev_head = NULL; + isolation_error_count = 0; + drain_allow = true; for (i = 0; i < nr_pages; i++) { head = compound_head(pages[i]); if (head == prev_head) @@ -1631,25 +1633,35 @@ check_again: * of the CMA zone if possible. */ if (is_migrate_cma_page(head)) { - if (PageHuge(head)) - isolate_huge_page(head, &cma_page_list); - else { + if (PageHuge(head)) { + if (!isolate_huge_page(head, &cma_page_list)) + isolation_error_count++; + } else { if (!PageLRU(head) && drain_allow) { lru_add_drain_all(); drain_allow = false; } - if (!isolate_lru_page(head)) { - list_add_tail(&head->lru, &cma_page_list); - mod_node_page_state(page_pgdat(head), - NR_ISOLATED_ANON + - page_is_file_lru(head), - thp_nr_pages(head)); + if (isolate_lru_page(head)) { + isolation_error_count++; + continue; } + list_add_tail(&head->lru, &cma_page_list); + mod_node_page_state(page_pgdat(head), + NR_ISOLATED_ANON + + page_is_file_lru(head), + thp_nr_pages(head)); } } } + /* + * If list is empty, and no isolation errors, means that all pages are + * in the correct zone. + */ + if (list_empty(&cma_page_list) && !isolation_error_count) + return ret; + if (!list_empty(&cma_page_list)) { /* * drop the above get_user_pages reference. @@ -1669,23 +1681,19 @@ check_again: return ret > 0 ? -ENOMEM : ret; } - /* - * We did migrate all the pages, Try to get the page references - * again migrating any new CMA pages which we failed to isolate - * earlier. - */ - ret = __get_user_pages_locked(mm, start, nr_pages, - pages, vmas, NULL, - gup_flags); - - if (ret > 0) { - nr_pages = ret; - drain_allow = true; - goto check_again; - } + /* We unpinned pages before migration, pin them again */ + ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, + NULL, gup_flags); + if (ret <= 0) + return ret; + nr_pages = ret; } - return ret; + /* + * check again because pages were unpinned, and we also might have + * had isolation errors and need more pages to migrate. + */ + goto check_again; } #else static long check_and_migrate_cma_pages(struct mm_struct *mm, |