aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNeilBrown <[email protected]>2022-03-22 14:38:54 -0700
committerLinus Torvalds <[email protected]>2022-03-22 15:57:00 -0700
commit9fd472af84abd6da15376353c2283b3df9497646 (patch)
tree7cf863fdd20550d447aa05b5488cb05d599f6a6a
parent84dacdbd5352bfef82423760fa2e8bffaeef9e05 (diff)
mm: improve cleanup when ->readpages doesn't process all pages
If ->readpages doesn't process all the pages, then it is best to act as though they weren't requested so that a subsequent readahead can try again. So: - remove any 'ahead' pages from the page cache so they can be loaded with ->readahead() rather then multiple ->read()s - update the file_ra_state to reflect the reads that were actually submitted. This allows ->readpages() to abort early due e.g. to congestion, which will then allow us to remove the inode_read_congested() test from page_Cache_async_ra(). Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: NeilBrown <[email protected]> Cc: Anna Schumaker <[email protected]> Cc: Chao Yu <[email protected]> Cc: Darrick J. Wong <[email protected]> Cc: Ilya Dryomov <[email protected]> Cc: Jaegeuk Kim <[email protected]> Cc: Jan Kara <[email protected]> Cc: Jeff Layton <[email protected]> Cc: Jens Axboe <[email protected]> Cc: Lars Ellenberg <[email protected]> Cc: Miklos Szeredi <[email protected]> Cc: Paolo Valente <[email protected]> Cc: Philipp Reisner <[email protected]> Cc: Ryusuke Konishi <[email protected]> Cc: Trond Myklebust <[email protected]> Cc: Wu Fengguang <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r--mm/readahead.c19
1 files changed, 17 insertions, 2 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index 73b2bc5302e0..8a97bd408cf6 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -104,7 +104,13 @@
* for necessary resources (e.g. memory or indexing information) to
* become available. Pages in the final ``async_size`` may be
* considered less urgent and failure to read them is more acceptable.
- * They will eventually be read individually using ->readpage().
+ * In this case it is best to use delete_from_page_cache() to remove the
+ * pages from the page cache as is automatically done for pages that
+ * were not fetched with readahead_page(). This will allow a
+ * subsequent synchronous read ahead request to try them again. If they
+ * are left in the page cache, then they will be read individually using
+ * ->readpage().
+ *
*/
#include <linux/kernel.h>
@@ -226,8 +232,17 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
if (aops->readahead) {
aops->readahead(rac);
- /* Clean up the remaining pages */
+ /*
+ * Clean up the remaining pages. The sizes in ->ra
+ * maybe be used to size next read-ahead, so make sure
+ * they accurately reflect what happened.
+ */
while ((page = readahead_page(rac))) {
+ rac->ra->size -= 1;
+ if (rac->ra->async_size > 0) {
+ rac->ra->async_size -= 1;
+ delete_from_page_cache(page);
+ }
unlock_page(page);
put_page(page);
}