diff options
Diffstat (limited to 'mm/memcontrol.c')
| -rw-r--r-- | mm/memcontrol.c | 112 | 
1 files changed, 57 insertions, 55 deletions
| diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 556859fec4ef..5585dc3d3646 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -776,7 +776,8 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)  	/* threshold event is triggered in finer grain than soft limit */  	if (unlikely(mem_cgroup_event_ratelimit(memcg,  						MEM_CGROUP_TARGET_THRESH))) { -		bool do_softlimit, do_numainfo; +		bool do_softlimit; +		bool do_numainfo __maybe_unused;  		do_softlimit = mem_cgroup_event_ratelimit(memcg,  						MEM_CGROUP_TARGET_SOFTLIMIT); @@ -1041,6 +1042,19 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,  	pc = lookup_page_cgroup(page);  	memcg = pc->mem_cgroup; + +	/* +	 * Surreptitiously switch any uncharged page to root: +	 * an uncharged page off lru does nothing to secure +	 * its former mem_cgroup from sudden removal. +	 * +	 * Our caller holds lru_lock, and PageCgroupUsed is updated +	 * under page_cgroup lock: between them, they make all uses +	 * of pc->mem_cgroup safe. +	 */ +	if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup) +		pc->mem_cgroup = memcg = root_mem_cgroup; +  	mz = page_cgroup_zoneinfo(memcg, page);  	/* compound_order() is stabilized through lru_lock */  	MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); @@ -2407,8 +2421,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,  				       struct page *page,  				       unsigned int nr_pages,  				       struct page_cgroup *pc, -				       enum charge_type ctype) +				       enum charge_type ctype, +				       bool lrucare)  { +	struct zone *uninitialized_var(zone); +	bool was_on_lru = false; +  	lock_page_cgroup(pc);  	if (unlikely(PageCgroupUsed(pc))) {  		unlock_page_cgroup(pc); @@ -2419,6 +2437,21 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,  	 * we don't need page_cgroup_lock about tail pages, becase they are not  	 * accessed by any other context at this point.  	 */ + +	/* +	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page +	 * may already be on some other mem_cgroup's LRU.  Take care of it. +	 */ +	if (lrucare) { +		zone = page_zone(page); +		spin_lock_irq(&zone->lru_lock); +		if (PageLRU(page)) { +			ClearPageLRU(page); +			del_page_from_lru_list(zone, page, page_lru(page)); +			was_on_lru = true; +		} +	} +  	pc->mem_cgroup = memcg;  	/*  	 * We access a page_cgroup asynchronously without lock_page_cgroup(). @@ -2442,9 +2475,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,  		break;  	} +	if (lrucare) { +		if (was_on_lru) { +			VM_BUG_ON(PageLRU(page)); +			SetPageLRU(page); +			add_page_to_lru_list(zone, page, page_lru(page)); +		} +		spin_unlock_irq(&zone->lru_lock); +	} +  	mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);  	unlock_page_cgroup(pc); -	WARN_ON_ONCE(PageLRU(page)); +  	/*  	 * "charge_statistics" updated event counter. Then, check it.  	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. @@ -2642,7 +2684,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,  	ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);  	if (ret == -ENOMEM)  		return ret; -	__mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype); +	__mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);  	return 0;  } @@ -2662,35 +2704,6 @@ static void  __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,  					enum charge_type ctype); -static void -__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg, -					enum charge_type ctype) -{ -	struct page_cgroup *pc = lookup_page_cgroup(page); -	struct zone *zone = page_zone(page); -	unsigned long flags; -	bool removed = false; - -	/* -	 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page -	 * is already on LRU. It means the page may on some other page_cgroup's -	 * LRU. Take care of it. -	 */ -	spin_lock_irqsave(&zone->lru_lock, flags); -	if (PageLRU(page)) { -		del_page_from_lru_list(zone, page, page_lru(page)); -		ClearPageLRU(page); -		removed = true; -	} -	__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype); -	if (removed) { -		add_page_to_lru_list(zone, page, page_lru(page)); -		SetPageLRU(page); -	} -	spin_unlock_irqrestore(&zone->lru_lock, flags); -	return; -} -  int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,  				gfp_t gfp_mask)  { @@ -2768,13 +2781,16 @@ static void  __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,  					enum charge_type ctype)  { +	struct page_cgroup *pc; +  	if (mem_cgroup_disabled())  		return;  	if (!memcg)  		return;  	cgroup_exclude_rmdir(&memcg->css); -	__mem_cgroup_commit_charge_lrucare(page, memcg, ctype); +	pc = lookup_page_cgroup(page); +	__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);  	/*  	 * Now swap is on-memory. This means this page may be  	 * counted both as mem and swap....double count. @@ -3026,23 +3042,6 @@ void mem_cgroup_uncharge_end(void)  	batch->memcg = NULL;  } -/* - * A function for resetting pc->mem_cgroup for newly allocated pages. - * This function should be called if the newpage will be added to LRU - * before start accounting. - */ -void mem_cgroup_reset_owner(struct page *newpage) -{ -	struct page_cgroup *pc; - -	if (mem_cgroup_disabled()) -		return; - -	pc = lookup_page_cgroup(newpage); -	VM_BUG_ON(PageCgroupUsed(pc)); -	pc->mem_cgroup = root_mem_cgroup; -} -  #ifdef CONFIG_SWAP  /*   * called after __delete_from_swap_cache() and drop "page" account. @@ -3247,7 +3246,7 @@ int mem_cgroup_prepare_migration(struct page *page,  		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;  	else  		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; -	__mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype); +	__mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);  	return ret;  } @@ -3331,7 +3330,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,  	 * the newpage may be on LRU(or pagevec for LRU) already. We lock  	 * LRU while we overwrite pc->mem_cgroup.  	 */ -	__mem_cgroup_commit_charge_lrucare(newpage, memcg, type); +	__mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);  }  #ifdef CONFIG_DEBUG_VM @@ -4413,6 +4412,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,  	 */  	BUG_ON(!thresholds); +	if (!thresholds->primary) +		goto unlock; +  	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);  	/* Check if a threshold crossed before removing */ @@ -4461,7 +4463,7 @@ swap_buffers:  	/* To be sure that nobody uses thresholds */  	synchronize_rcu(); - +unlock:  	mutex_unlock(&memcg->thresholds_lock);  } @@ -5073,7 +5075,7 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,  		return NULL;  	if (PageAnon(page)) {  		/* we don't move shared anon */ -		if (!move_anon() || page_mapcount(page) > 2) +		if (!move_anon() || page_mapcount(page) > 1)  			return NULL;  	} else if (!move_file())  		/* we ignore mapcount for file pages */ |