diff options
author | Matthew Wilcox (Oracle) <[email protected]> | 2020-06-01 21:46:47 -0700 |
---|---|---|
committer | Linus Torvalds <[email protected]> | 2020-06-02 10:59:06 -0700 |
commit | b0f31d78cbc191058e654c8eb062a864b6c9a7eb (patch) | |
tree | 6c692ab6616a70c8c07edc0e03f21bbdc85c2336 | |
parent | 8151b4c8bee43cea7a28cb0300123df90880e60c (diff) |
mm: move end_index check out of readahead loop
By reducing nr_to_read, we can eliminate this check from inside the loop.
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Reviewed-by: John Hubbard <[email protected]>
Reviewed-by: William Kucharski <[email protected]>
Cc: Chao Yu <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Cong Wang <[email protected]>
Cc: Darrick J. Wong <[email protected]>
Cc: Dave Chinner <[email protected]>
Cc: Eric Biggers <[email protected]>
Cc: Gao Xiang <[email protected]>
Cc: Jaegeuk Kim <[email protected]>
Cc: Joseph Qi <[email protected]>
Cc: Junxiao Bi <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Zi Yan <[email protected]>
Cc: Johannes Thumshirn <[email protected]>
Cc: Miklos Szeredi <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r-- | mm/readahead.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/mm/readahead.c b/mm/readahead.c index d01531ef9f3c..998fdd23c0b1 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -167,8 +167,6 @@ void __do_page_cache_readahead(struct address_space *mapping, unsigned long lookahead_size) { struct inode *inode = mapping->host; - struct page *page; - unsigned long end_index; /* The last page we want to read */ LIST_HEAD(page_pool); loff_t isize = i_size_read(inode); gfp_t gfp_mask = readahead_gfp_mask(mapping); @@ -178,22 +176,26 @@ void __do_page_cache_readahead(struct address_space *mapping, ._index = index, }; unsigned long i; + pgoff_t end_index; /* The last page we want to read */ if (isize == 0) return; - end_index = ((isize - 1) >> PAGE_SHIFT); + end_index = (isize - 1) >> PAGE_SHIFT; + if (index > end_index) + return; + /* Don't read past the page containing the last byte of the file */ + if (nr_to_read > end_index - index) + nr_to_read = end_index - index + 1; /* * Preallocate as many pages as we will need. */ for (i = 0; i < nr_to_read; i++) { - if (index + i > end_index) - break; + struct page *page = xa_load(&mapping->i_pages, index + i); BUG_ON(index + i != rac._index + rac._nr_pages); - page = xa_load(&mapping->i_pages, index + i); if (page && !xa_is_value(page)) { /* * Page already present? Kick off the current batch of |