aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMuchun Song <[email protected]>2021-02-24 12:03:15 -0800
committerLinus Torvalds <[email protected]>2021-02-24 13:38:29 -0800
commitf3344adf38bdb3107d40483dd9501215ad40edce (patch)
treeafd77361c5a91919d9c953a3d6fc906b7da6ee54
parent2e9bd483159939ed2c0704b914294653c8341d25 (diff)
mm: memcontrol: optimize per-lruvec stats counter memory usage
The vmstat threshold is 32 (MEMCG_CHARGE_BATCH), Actually the threshold can be as big as MEMCG_CHARGE_BATCH * PAGE_SIZE. It still fits into s32. So introduce struct batched_lruvec_stat to optimize memory usage. The size of struct lruvec_stat is 304 bytes on 64 bit systems. As it is a per-cpu structure. So with this patch, we can save 304 / 2 * ncpu bytes per-memcg per-node where ncpu is the number of the possible CPU. If there are c memory cgroup (include dying cgroup) and n NUMA node in the system. Finally, we can save (152 * ncpu * c * n) bytes. [[email protected]: fix typo in comment] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Muchun Song <[email protected]> Reviewed-by: Shakeel Butt <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Vladimir Davydov <[email protected]> Cc: Shakeel Butt <[email protected]> Cc: Roman Gushchin <[email protected]> Cc: Stephen Rothwell <[email protected]> Cc: Chris Down <[email protected]> Cc: Yafang Shao <[email protected]> Cc: Wei Yang <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r--include/linux/memcontrol.h14
-rw-r--r--mm/memcontrol.c10
2 files changed, 21 insertions, 3 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 7a4dd1cb19fe..41bbf71edd9f 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -92,6 +92,10 @@ struct lruvec_stat {
long count[NR_VM_NODE_STAT_ITEMS];
};
+struct batched_lruvec_stat {
+ s32 count[NR_VM_NODE_STAT_ITEMS];
+};
+
/*
* Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
* which have elements charged to this memcg.
@@ -107,11 +111,17 @@ struct memcg_shrinker_map {
struct mem_cgroup_per_node {
struct lruvec lruvec;
- /* Legacy local VM stats */
+ /*
+ * Legacy local VM stats. This should be struct lruvec_stat and
+ * cannot be optimized to struct batched_lruvec_stat. Because
+ * the threshold of the lruvec_stat_cpu can be as big as
+ * MEMCG_CHARGE_BATCH * PAGE_SIZE. It can fit into s32. But this
+ * filed has no upper limit.
+ */
struct lruvec_stat __percpu *lruvec_stat_local;
/* Subtree VM stats (batched updates) */
- struct lruvec_stat __percpu *lruvec_stat_cpu;
+ struct batched_lruvec_stat __percpu *lruvec_stat_cpu;
atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 60ce452e42e6..b259e7d8ce41 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5208,7 +5208,7 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
return 1;
}
- pn->lruvec_stat_cpu = alloc_percpu_gfp(struct lruvec_stat,
+ pn->lruvec_stat_cpu = alloc_percpu_gfp(struct batched_lruvec_stat,
GFP_KERNEL_ACCOUNT);
if (!pn->lruvec_stat_cpu) {
free_percpu(pn->lruvec_stat_local);
@@ -7093,6 +7093,14 @@ static int __init mem_cgroup_init(void)
{
int cpu, node;
+ /*
+ * Currently s32 type (can refer to struct batched_lruvec_stat) is
+ * used for per-memcg-per-cpu caching of per-node statistics. In order
+ * to work fine, we should make sure that the overfill threshold can't
+ * exceed S32_MAX / PAGE_SIZE.
+ */
+ BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
+
cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
memcg_hotplug_cpu_dead);