aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHuang Ying <[email protected]>2017-09-06 16:24:33 -0700
committerLinus Torvalds <[email protected]>2017-09-06 17:27:29 -0700
commitc4fa63092f216737b60c789968371d9960a598e5 (patch)
treec68329a01a3925a41522ab8ea6b3ae437d97185c
parentcbc65df240c104bf540af1ad58595bf1eaa5ee10 (diff)
mm, swap: fix swap readahead marking
In the original implementation, it is possible that the existing pages in the swap cache (not newly readahead) could be marked as the readahead pages. This will cause the statistics of swap readahead be wrong and influence the swap readahead algorithm too. This is fixed via marking a page as the readahead page only if it is newly allocated and read from the disk. When testing with linpack, after the fixing the swap readahead hit rate increased from ~66% to ~86%. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: "Huang, Ying" <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Shaohua Li <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Fengguang Wu <[email protected]> Cc: Tim Chen <[email protected]> Cc: Dave Hansen <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r--mm/swap_state.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c
index d1bdb31cab13..a901afe9da61 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -498,7 +498,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
unsigned long start_offset, end_offset;
unsigned long mask;
struct blk_plug plug;
- bool do_poll = true;
+ bool do_poll = true, page_allocated;
mask = swapin_nr_pages(offset) - 1;
if (!mask)
@@ -514,14 +514,18 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
blk_start_plug(&plug);
for (offset = start_offset; offset <= end_offset ; offset++) {
/* Ok, do the async read-ahead now */
- page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
- gfp_mask, vma, addr, false);
+ page = __read_swap_cache_async(
+ swp_entry(swp_type(entry), offset),
+ gfp_mask, vma, addr, &page_allocated);
if (!page)
continue;
- if (offset != entry_offset &&
- likely(!PageTransCompound(page))) {
- SetPageReadahead(page);
- count_vm_event(SWAP_RA);
+ if (page_allocated) {
+ swap_readpage(page, false);
+ if (offset != entry_offset &&
+ likely(!PageTransCompound(page))) {
+ SetPageReadahead(page);
+ count_vm_event(SWAP_RA);
+ }
}
put_page(page);
}