diff options
Diffstat (limited to 'mm/memcontrol.c')
| -rw-r--r-- | mm/memcontrol.c | 54 | 
1 files changed, 35 insertions, 19 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 4a7b3ebf8e48..36e9f38c919d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -84,7 +84,7 @@ EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);  static bool cgroup_memory_nosocket __ro_after_init;  /* Kernel memory accounting disabled? */ -bool cgroup_memory_nokmem __ro_after_init; +static bool cgroup_memory_nokmem __ro_after_init;  /* Whether the swap controller is active */  #ifdef CONFIG_MEMCG_SWAP @@ -254,7 +254,7 @@ struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)  }  #ifdef CONFIG_MEMCG_KMEM -extern spinlock_t css_set_lock; +static DEFINE_SPINLOCK(objcg_lock);  bool mem_cgroup_kmem_disabled(void)  { @@ -298,9 +298,9 @@ static void obj_cgroup_release(struct percpu_ref *ref)  	if (nr_pages)  		obj_cgroup_uncharge_pages(objcg, nr_pages); -	spin_lock_irqsave(&css_set_lock, flags); +	spin_lock_irqsave(&objcg_lock, flags);  	list_del(&objcg->list); -	spin_unlock_irqrestore(&css_set_lock, flags); +	spin_unlock_irqrestore(&objcg_lock, flags);  	percpu_ref_exit(ref);  	kfree_rcu(objcg, rcu); @@ -332,7 +332,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg,  	objcg = rcu_replace_pointer(memcg->objcg, NULL, true); -	spin_lock_irq(&css_set_lock); +	spin_lock_irq(&objcg_lock);  	/* 1) Ready to reparent active objcg. */  	list_add(&objcg->list, &memcg->objcg_list); @@ -342,7 +342,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg,  	/* 3) Move already reparented objcgs to the parent's list */  	list_splice(&memcg->objcg_list, &parent->objcg_list); -	spin_unlock_irq(&css_set_lock); +	spin_unlock_irq(&objcg_lock);  	percpu_ref_kill(&objcg->refcnt);  } @@ -629,11 +629,17 @@ static DEFINE_SPINLOCK(stats_flush_lock);  static DEFINE_PER_CPU(unsigned int, stats_updates);  static atomic_t stats_flush_threshold = ATOMIC_INIT(0); -static inline void memcg_rstat_updated(struct mem_cgroup *memcg) +static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)  { +	unsigned int x; +  	cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); -	if (!(__this_cpu_inc_return(stats_updates) % MEMCG_CHARGE_BATCH)) -		atomic_inc(&stats_flush_threshold); + +	x = __this_cpu_add_return(stats_updates, abs(val)); +	if (x > MEMCG_CHARGE_BATCH) { +		atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold); +		__this_cpu_write(stats_updates, 0); +	}  }  static void __mem_cgroup_flush_stats(void) @@ -656,7 +662,7 @@ void mem_cgroup_flush_stats(void)  static void flush_memcg_stats_dwork(struct work_struct *w)  { -	mem_cgroup_flush_stats(); +	__mem_cgroup_flush_stats();  	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);  } @@ -672,7 +678,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)  		return;  	__this_cpu_add(memcg->vmstats_percpu->state[idx], val); -	memcg_rstat_updated(memcg); +	memcg_rstat_updated(memcg, val);  }  /* idx can be of type enum memcg_stat_item or node_stat_item. */ @@ -705,7 +711,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,  	/* Update lruvec */  	__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val); -	memcg_rstat_updated(memcg); +	memcg_rstat_updated(memcg, val);  }  /** @@ -789,7 +795,7 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,  		return;  	__this_cpu_add(memcg->vmstats_percpu->events[idx], count); -	memcg_rstat_updated(memcg); +	memcg_rstat_updated(memcg, count);  }  static unsigned long memcg_events(struct mem_cgroup *memcg, int event) @@ -1369,6 +1375,7 @@ static const struct memory_stat memory_stats[] = {  	{ "pagetables",			NR_PAGETABLE			},  	{ "percpu",			MEMCG_PERCPU_B			},  	{ "sock",			MEMCG_SOCK			}, +	{ "vmalloc",			MEMCG_VMALLOC			},  	{ "shmem",			NR_SHMEM			},  	{ "file_mapped",		NR_FILE_MAPPED			},  	{ "file_dirty",			NR_FILE_DIRTY			}, @@ -4850,6 +4857,17 @@ out_kfree:  	return ret;  } +#if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) +static int mem_cgroup_slab_show(struct seq_file *m, void *p) +{ +	/* +	 * Deprecated. +	 * Please, take a look at tools/cgroup/slabinfo.py . +	 */ +	return 0; +} +#endif +  static struct cftype mem_cgroup_legacy_files[] = {  	{  		.name = "usage_in_bytes", @@ -4950,7 +4968,7 @@ static struct cftype mem_cgroup_legacy_files[] = {  	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))  	{  		.name = "kmem.slabinfo", -		.seq_show = memcg_slab_show, +		.seq_show = mem_cgroup_slab_show,  	},  #endif  	{ @@ -5110,15 +5128,11 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)  static struct mem_cgroup *mem_cgroup_alloc(void)  {  	struct mem_cgroup *memcg; -	unsigned int size;  	int node;  	int __maybe_unused i;  	long error = -ENOMEM; -	size = sizeof(struct mem_cgroup); -	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); - -	memcg = kzalloc(size, GFP_KERNEL); +	memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);  	if (!memcg)  		return ERR_PTR(error); @@ -6312,6 +6326,8 @@ static void __memory_events_show(struct seq_file *m, atomic_long_t *events)  	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));  	seq_printf(m, "oom_kill %lu\n",  		   atomic_long_read(&events[MEMCG_OOM_KILL])); +	seq_printf(m, "oom_group_kill %lu\n", +		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));  }  static int memory_events_show(struct seq_file *m, void *v)  |