aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Shi <[email protected]>2020-12-15 14:20:50 -0800
committerLinus Torvalds <[email protected]>2020-12-15 14:48:03 -0800
commit75cc3c9161cd95f43ebf6c6a938d4d98ab195bbd (patch)
treeb70616e1da95adbaefc152db0ff6cf7d08722749
parentc7c7b80c39a18d99a0a34534ed8b82e020da6131 (diff)
mm/lru: move lock into lru_note_cost
We have to move lru_lock into lru_note_cost, since it cycle up on memcg tree, for future per lruvec lru_lock replace. It's a bit ugly and may cost a bit more locking, but benefit from multiple memcg locking could cover the lost. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Alex Shi <[email protected]> Acked-by: Hugh Dickins <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Alexander Duyck <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Andrey Ryabinin <[email protected]> Cc: "Chen, Rong A" <[email protected]> Cc: Daniel Jordan <[email protected]> Cc: "Huang, Ying" <[email protected]> Cc: Jann Horn <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Konstantin Khlebnikov <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Mika Penttilä <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Shakeel Butt <[email protected]> Cc: Tejun Heo <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vladimir Davydov <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Wei Yang <[email protected]> Cc: Yang Shi <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r--mm/swap.c3
-rw-r--r--mm/vmscan.c4
-rw-r--r--mm/workingset.c2
3 files changed, 4 insertions, 5 deletions
diff --git a/mm/swap.c b/mm/swap.c
index b4ca5b965838..c3187d04f9b2 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -268,7 +268,9 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
{
do {
unsigned long lrusize;
+ struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+ spin_lock_irq(&pgdat->lru_lock);
/* Record cost event */
if (file)
lruvec->file_cost += nr_pages;
@@ -292,6 +294,7 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
lruvec->file_cost /= 2;
lruvec->anon_cost /= 2;
}
+ spin_unlock_irq(&pgdat->lru_lock);
} while ((lruvec = parent_lruvec(lruvec)));
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1c3df77972e8..f69601d39a6d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1971,19 +1971,17 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false);
spin_lock_irq(&pgdat->lru_lock);
-
move_pages_to_lru(lruvec, &page_list);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
- lru_note_cost(lruvec, file, stat.nr_pageout);
item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_reclaimed);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
-
spin_unlock_irq(&pgdat->lru_lock);
+ lru_note_cost(lruvec, file, stat.nr_pageout);
mem_cgroup_uncharge_list(&page_list);
free_unref_page_list(&page_list);
diff --git a/mm/workingset.c b/mm/workingset.c
index 25f75bbe80e0..94b512538d5a 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -381,9 +381,7 @@ void workingset_refault(struct page *page, void *shadow)
if (workingset) {
SetPageWorkingset(page);
/* XXX: Move to lru_cache_add() when it supports new vs putback */
- spin_lock_irq(&page_pgdat(page)->lru_lock);
lru_note_cost_page(page);
- spin_unlock_irq(&page_pgdat(page)->lru_lock);
inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file);
}
out: