aboutsummaryrefslogtreecommitdiff
path: root/mm/truncate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/truncate.c')
-rw-r--r--mm/truncate.c119
1 files changed, 61 insertions, 58 deletions
diff --git a/mm/truncate.c b/mm/truncate.c
index 0668cd340a46..7c304d2f0052 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -23,42 +23,28 @@
#include <linux/rmap.h>
#include "internal.h"
-/*
- * Regular page slots are stabilized by the page lock even without the tree
- * itself locked. These unlocked entries need verification under the tree
- * lock.
- */
-static inline void __clear_shadow_entry(struct address_space *mapping,
- pgoff_t index, void *entry)
-{
- XA_STATE(xas, &mapping->i_pages, index);
-
- xas_set_update(&xas, workingset_update_node);
- if (xas_load(&xas) != entry)
- return;
- xas_store(&xas, NULL);
-}
-
static void clear_shadow_entries(struct address_space *mapping,
- struct folio_batch *fbatch, pgoff_t *indices)
+ unsigned long start, unsigned long max)
{
- int i;
+ XA_STATE(xas, &mapping->i_pages, start);
+ struct folio *folio;
/* Handled by shmem itself, or for DAX we do nothing. */
if (shmem_mapping(mapping) || dax_mapping(mapping))
return;
- spin_lock(&mapping->host->i_lock);
- xa_lock_irq(&mapping->i_pages);
+ xas_set_update(&xas, workingset_update_node);
- for (i = 0; i < folio_batch_count(fbatch); i++) {
- struct folio *folio = fbatch->folios[i];
+ spin_lock(&mapping->host->i_lock);
+ xas_lock_irq(&xas);
+ /* Clear all shadow entries from start to max */
+ xas_for_each(&xas, folio, max) {
if (xa_is_value(folio))
- __clear_shadow_entry(mapping, indices[i], folio);
+ xas_store(&xas, NULL);
}
- xa_unlock_irq(&mapping->i_pages);
+ xas_unlock_irq(&xas);
if (mapping_shrinkable(mapping))
inode_add_lru(mapping->host);
spin_unlock(&mapping->host->i_lock);
@@ -68,54 +54,53 @@ static void clear_shadow_entries(struct address_space *mapping,
* Unconditionally remove exceptional entries. Usually called from truncate
* path. Note that the folio_batch may be altered by this function by removing
* exceptional entries similar to what folio_batch_remove_exceptionals() does.
+ * Please note that indices[] has entries in ascending order as guaranteed by
+ * either find_get_entries() or find_lock_entries().
*/
static void truncate_folio_batch_exceptionals(struct address_space *mapping,
struct folio_batch *fbatch, pgoff_t *indices)
{
+ XA_STATE(xas, &mapping->i_pages, indices[0]);
+ int nr = folio_batch_count(fbatch);
+ struct folio *folio;
int i, j;
- bool dax;
/* Handled by shmem itself */
if (shmem_mapping(mapping))
return;
- for (j = 0; j < folio_batch_count(fbatch); j++)
+ for (j = 0; j < nr; j++)
if (xa_is_value(fbatch->folios[j]))
break;
- if (j == folio_batch_count(fbatch))
+ if (j == nr)
return;
- dax = dax_mapping(mapping);
- if (!dax) {
- spin_lock(&mapping->host->i_lock);
- xa_lock_irq(&mapping->i_pages);
+ if (dax_mapping(mapping)) {
+ for (i = j; i < nr; i++) {
+ if (xa_is_value(fbatch->folios[i]))
+ dax_delete_mapping_entry(mapping, indices[i]);
+ }
+ goto out;
}
- for (i = j; i < folio_batch_count(fbatch); i++) {
- struct folio *folio = fbatch->folios[i];
- pgoff_t index = indices[i];
-
- if (!xa_is_value(folio)) {
- fbatch->folios[j++] = folio;
- continue;
- }
+ xas_set(&xas, indices[j]);
+ xas_set_update(&xas, workingset_update_node);
- if (unlikely(dax)) {
- dax_delete_mapping_entry(mapping, index);
- continue;
- }
+ spin_lock(&mapping->host->i_lock);
+ xas_lock_irq(&xas);
- __clear_shadow_entry(mapping, index, folio);
+ xas_for_each(&xas, folio, indices[nr-1]) {
+ if (xa_is_value(folio))
+ xas_store(&xas, NULL);
}
- if (!dax) {
- xa_unlock_irq(&mapping->i_pages);
- if (mapping_shrinkable(mapping))
- inode_add_lru(mapping->host);
- spin_unlock(&mapping->host->i_lock);
- }
- fbatch->nr = j;
+ xas_unlock_irq(&xas);
+ if (mapping_shrinkable(mapping))
+ inode_add_lru(mapping->host);
+ spin_unlock(&mapping->host->i_lock);
+out:
+ folio_batch_remove_exceptionals(fbatch);
}
/**
@@ -166,7 +151,6 @@ static void truncate_cleanup_folio(struct folio *folio)
* Hence dirty accounting check is placed after invalidation.
*/
folio_cancel_dirty(folio);
- folio_clear_mappedtodisk(folio);
}
int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
@@ -478,11 +462,13 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
unsigned long ret;
unsigned long count = 0;
int i;
- bool xa_has_values = false;
folio_batch_init(&fbatch);
while (find_lock_entries(mapping, &index, end, &fbatch, indices)) {
- for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ bool xa_has_values = false;
+ int nr = folio_batch_count(&fbatch);
+
+ for (i = 0; i < nr; i++) {
struct folio *folio = fbatch.folios[i];
/* We rely upon deletion not changing folio->index */
@@ -509,7 +495,7 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
}
if (xa_has_values)
- clear_shadow_entries(mapping, &fbatch, indices);
+ clear_shadow_entries(mapping, indices[0], indices[nr-1]);
folio_batch_remove_exceptionals(&fbatch);
folio_batch_release(&fbatch);
@@ -605,7 +591,6 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
int ret = 0;
int ret2 = 0;
int did_range_unmap = 0;
- bool xa_has_values = false;
if (mapping_empty(mapping))
return 0;
@@ -613,7 +598,10 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
folio_batch_init(&fbatch);
index = start;
while (find_get_entries(mapping, &index, end, &fbatch, indices)) {
- for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ bool xa_has_values = false;
+ int nr = folio_batch_count(&fbatch);
+
+ for (i = 0; i < nr; i++) {
struct folio *folio = fbatch.folios[i];
/* We rely upon deletion not changing folio->index */
@@ -659,7 +647,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
}
if (xa_has_values)
- clear_shadow_entries(mapping, &fbatch, indices);
+ clear_shadow_entries(mapping, indices[0], indices[nr-1]);
folio_batch_remove_exceptionals(&fbatch);
folio_batch_release(&fbatch);
@@ -797,6 +785,21 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
*/
if (folio_mkclean(folio))
folio_mark_dirty(folio);
+
+ /*
+ * The post-eof range of the folio must be zeroed before it is exposed
+ * to the file. Writeback normally does this, but since i_size has been
+ * increased we handle it here.
+ */
+ if (folio_test_dirty(folio)) {
+ unsigned int offset, end;
+
+ offset = from - folio_pos(folio);
+ end = min_t(unsigned int, to - folio_pos(folio),
+ folio_size(folio));
+ folio_zero_segment(folio, offset, end);
+ }
+
folio_unlock(folio);
folio_put(folio);
}