aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoonsoo Kim <[email protected]>2020-06-25 20:30:37 -0700
committerLinus Torvalds <[email protected]>2020-06-26 00:27:38 -0700
commit0076f029cb2906d32baf3bf4401ef09663071d16 (patch)
treef802cfcf449f056fa1a86dc3491c2bd0caf67f72
parentcb6868832ede5cd73b346ec11cf89814d26ff7c7 (diff)
mm/memory: fix IO cost for anonymous page
With synchronous IO swap device, swap-in is directly handled in fault code. Since IO cost notation isn't added there, with synchronous IO swap device, LRU balancing could be wrongly biased. Fix it to count it in fault code. Link: http://lkml.kernel.org/r/[email protected] Fixes: 314b57fb0460001 ("mm: balance LRU lists based on relative thrashing cache sizing") Signed-off-by: Joonsoo Kim <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Rik van Riel <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r--mm/memory.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 0e5b25c9b151..87ec87cdc1ff 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3146,6 +3146,14 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out_page;
}
+ /*
+ * XXX: Move to lru_cache_add() when it
+ * supports new vs putback
+ */
+ spin_lock_irq(&page_pgdat(page)->lru_lock);
+ lru_note_cost_page(page);
+ spin_unlock_irq(&page_pgdat(page)->lru_lock);
+
lru_cache_add(page);
swap_readpage(page, true);
}