aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBaolin Wang <[email protected]>2024-08-12 15:42:05 +0800
committerAndrew Morton <[email protected]>2024-09-03 21:15:34 -0700
commitfb72415938d109fe8cca339a4f1423f76ba213c5 (patch)
tree70f4a8c767f2faef699a7e6e01cd8a6254b14d04
parent6ea0d1ccb110387244e04637f28a1d2eda54e3fb (diff)
mm: filemap: use xa_get_order() to get the swap entry order
In the following patches, shmem will support the swap out of large folios, which means the shmem mappings may contain large order swap entries, so using xa_get_order() to get the folio order of the shmem swap entry to update the '*start' correctly. [[email protected]: use xa_get_order() to get the swap entry order] Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/6876d55145c1cc80e79df7884aa3a62e397b101d.1723434324.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang <[email protected]> Signed-off-by: Hugh Dickins <[email protected]> Cc: Barry Song <[email protected]> Cc: Chris Li <[email protected]> Cc: Daniel Gomez <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: "Huang, Ying" <[email protected]> Cc: Kefeng Wang <[email protected]> Cc: Lance Yang <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Pankaj Raghav <[email protected]> Cc: Ryan Roberts <[email protected]> Cc: Yang Shi <[email protected]> Cc: Zi Yan <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
-rw-r--r--mm/filemap.c41
1 files changed, 27 insertions, 14 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index fcfec2a78a30..fdaa1e5e3078 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2046,17 +2046,20 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
if (!folio_batch_add(fbatch, folio))
break;
}
- rcu_read_unlock();
if (folio_batch_count(fbatch)) {
- unsigned long nr = 1;
+ unsigned long nr;
int idx = folio_batch_count(fbatch) - 1;
folio = fbatch->folios[idx];
if (!xa_is_value(folio))
nr = folio_nr_pages(folio);
- *start = indices[idx] + nr;
+ else
+ nr = 1 << xa_get_order(&mapping->i_pages, indices[idx]);
+ *start = round_down(indices[idx] + nr, nr);
}
+ rcu_read_unlock();
+
return folio_batch_count(fbatch);
}
@@ -2088,10 +2091,17 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
rcu_read_lock();
while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
+ unsigned long base;
+ unsigned long nr;
+
if (!xa_is_value(folio)) {
- if (folio->index < *start)
+ nr = folio_nr_pages(folio);
+ base = folio->index;
+ /* Omit large folio which begins before the start */
+ if (base < *start)
goto put;
- if (folio_next_index(folio) - 1 > end)
+ /* Omit large folio which extends beyond the end */
+ if (base + nr - 1 > end)
goto put;
if (!folio_trylock(folio))
goto put;
@@ -2100,7 +2110,19 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
goto unlock;
VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
folio);
+ } else {
+ nr = 1 << xa_get_order(&mapping->i_pages, xas.xa_index);
+ base = xas.xa_index & ~(nr - 1);
+ /* Omit order>0 value which begins before the start */
+ if (base < *start)
+ continue;
+ /* Omit order>0 value which extends beyond the end */
+ if (base + nr - 1 > end)
+ break;
}
+
+ /* Update start now so that last update is correct on return */
+ *start = base + nr;
indices[fbatch->nr] = xas.xa_index;
if (!folio_batch_add(fbatch, folio))
break;
@@ -2112,15 +2134,6 @@ put:
}
rcu_read_unlock();
- if (folio_batch_count(fbatch)) {
- unsigned long nr = 1;
- int idx = folio_batch_count(fbatch) - 1;
-
- folio = fbatch->folios[idx];
- if (!xa_is_value(folio))
- nr = folio_nr_pages(folio);
- *start = indices[idx] + nr;
- }
return folio_batch_count(fbatch);
}