aboutsummaryrefslogtreecommitdiff
path: root/include/linux/memcontrol.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/memcontrol.h')
-rw-r--r--include/linux/memcontrol.h50
1 files changed, 34 insertions, 16 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index edf9e8f32d70..44c41462be33 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -117,9 +117,12 @@ struct memcg_shrinker_map {
struct mem_cgroup_per_node {
struct lruvec lruvec;
+ /* Legacy local VM stats */
+ struct lruvec_stat __percpu *lruvec_stat_local;
+
+ /* Subtree VM stats (batched updates) */
struct lruvec_stat __percpu *lruvec_stat_cpu;
atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
- atomic_long_t lruvec_stat_local[NR_VM_NODE_STAT_ITEMS];
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
@@ -230,8 +233,9 @@ struct mem_cgroup {
/* OOM-Killer disable */
int oom_kill_disable;
- /* memory.events */
+ /* memory.events and memory.events.local */
struct cgroup_file events_file;
+ struct cgroup_file events_local_file;
/* handle for "memory.swap.events" */
struct cgroup_file swap_events_file;
@@ -265,18 +269,20 @@ struct mem_cgroup {
atomic_t moving_account;
struct task_struct *move_lock_task;
- /* memory.stat */
+ /* Legacy local VM stats and events */
+ struct memcg_vmstats_percpu __percpu *vmstats_local;
+
+ /* Subtree VM stats and events (batched updates) */
struct memcg_vmstats_percpu __percpu *vmstats_percpu;
MEMCG_PADDING(_pad2_);
atomic_long_t vmstats[MEMCG_NR_STAT];
- atomic_long_t vmstats_local[MEMCG_NR_STAT];
-
atomic_long_t vmevents[NR_VM_EVENT_ITEMS];
- atomic_long_t vmevents_local[NR_VM_EVENT_ITEMS];
+ /* memory.events */
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
+ atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
unsigned long socket_pressure;
@@ -388,7 +394,6 @@ out:
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
-bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
@@ -567,7 +572,11 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
int idx)
{
- long x = atomic_long_read(&memcg->vmstats_local[idx]);
+ long x = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
@@ -641,13 +650,15 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
enum node_stat_item idx)
{
struct mem_cgroup_per_node *pn;
- long x;
+ long x = 0;
+ int cpu;
if (mem_cgroup_disabled())
return node_page_state(lruvec_pgdat(lruvec), idx);
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- x = atomic_long_read(&pn->lruvec_stat_local[idx]);
+ for_each_possible_cpu(cpu)
+ x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
@@ -737,6 +748,9 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
static inline void memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event)
{
+ atomic_long_inc(&memcg->memory_events_local[event]);
+ cgroup_file_notify(&memcg->events_local_file);
+
do {
atomic_long_inc(&memcg->memory_events[event]);
cgroup_file_notify(&memcg->events_file);
@@ -860,12 +874,6 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
return true;
}
-static inline bool task_in_mem_cgroup(struct task_struct *task,
- const struct mem_cgroup *memcg)
-{
- return true;
-}
-
static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
{
return NULL;
@@ -1263,6 +1271,8 @@ int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge(struct page *page, int order);
int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
struct mem_cgroup *memcg);
+void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
+ unsigned int nr_pages);
extern struct static_key_false memcg_kmem_enabled_key;
extern struct workqueue_struct *memcg_kmem_cache_wq;
@@ -1304,6 +1314,14 @@ static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp,
return __memcg_kmem_charge_memcg(page, gfp, order, memcg);
return 0;
}
+
+static inline void memcg_kmem_uncharge_memcg(struct page *page, int order,
+ struct mem_cgroup *memcg)
+{
+ if (memcg_kmem_enabled())
+ __memcg_kmem_uncharge_memcg(memcg, 1 << order);
+}
+
/*
* helper for accessing a memcg's index. It will be used as an index in the
* child cache array in kmem_cache, and also to derive its name. This function