diff options
Diffstat (limited to 'mm/slab.c')
| -rw-r--r-- | mm/slab.c | 129 | 
1 files changed, 47 insertions, 82 deletions
diff --git a/mm/slab.c b/mm/slab.c index 0b0550ca85b4..87b29e76cafd 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -227,13 +227,14 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)  	INIT_LIST_HEAD(&parent->slabs_full);  	INIT_LIST_HEAD(&parent->slabs_partial);  	INIT_LIST_HEAD(&parent->slabs_free); +	parent->total_slabs = 0; +	parent->free_slabs = 0;  	parent->shared = NULL;  	parent->alien = NULL;  	parent->colour_next = 0;  	spin_lock_init(&parent->list_lock);  	parent->free_objects = 0;  	parent->free_touched = 0; -	parent->num_slabs = 0;  }  #define MAKE_LIST(cachep, listp, slab, nodeid)				\ @@ -1366,7 +1367,6 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)  {  #if DEBUG  	struct kmem_cache_node *n; -	struct page *page;  	unsigned long flags;  	int node;  	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL, @@ -1381,32 +1381,18 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)  		cachep->name, cachep->size, cachep->gfporder);  	for_each_kmem_cache_node(cachep, node, n) { -		unsigned long active_objs = 0, num_objs = 0, free_objects = 0; -		unsigned long active_slabs = 0, num_slabs = 0; -		unsigned long num_slabs_partial = 0, num_slabs_free = 0; -		unsigned long num_slabs_full; +		unsigned long total_slabs, free_slabs, free_objs;  		spin_lock_irqsave(&n->list_lock, flags); -		num_slabs = n->num_slabs; -		list_for_each_entry(page, &n->slabs_partial, lru) { -			active_objs += page->active; -			num_slabs_partial++; -		} -		list_for_each_entry(page, &n->slabs_free, lru) -			num_slabs_free++; - -		free_objects += n->free_objects; +		total_slabs = n->total_slabs; +		free_slabs = n->free_slabs; +		free_objs = n->free_objects;  		spin_unlock_irqrestore(&n->list_lock, flags); -		num_objs = num_slabs * cachep->num; -		active_slabs = num_slabs - num_slabs_free; -		num_slabs_full = num_slabs - -			(num_slabs_partial + num_slabs_free); -		active_objs += (num_slabs_full * cachep->num); - -		pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n", -			node, active_slabs, num_slabs, active_objs, num_objs, -			free_objects); +		pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld\n", +			node, total_slabs - free_slabs, total_slabs, +			(total_slabs * cachep->num) - free_objs, +			total_slabs * cachep->num);  	}  #endif  } @@ -2318,7 +2304,8 @@ static int drain_freelist(struct kmem_cache *cache,  		page = list_entry(p, struct page, lru);  		list_del(&page->lru); -		n->num_slabs--; +		n->free_slabs--; +		n->total_slabs--;  		/*  		 * Safe to drop the lock. The slab is no longer linked  		 * to the cache. @@ -2332,7 +2319,7 @@ out:  	return nr_freed;  } -int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) +int __kmem_cache_shrink(struct kmem_cache *cachep)  {  	int ret = 0;  	int node; @@ -2352,7 +2339,7 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)  int __kmem_cache_shutdown(struct kmem_cache *cachep)  { -	return __kmem_cache_shrink(cachep, false); +	return __kmem_cache_shrink(cachep);  }  void __kmem_cache_release(struct kmem_cache *cachep) @@ -2753,12 +2740,13 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)  	n = get_node(cachep, page_to_nid(page));  	spin_lock(&n->list_lock); -	if (!page->active) +	n->total_slabs++; +	if (!page->active) {  		list_add_tail(&page->lru, &(n->slabs_free)); -	else +		n->free_slabs++; +	} else  		fixup_slab_list(cachep, n, page, &list); -	n->num_slabs++;  	STATS_INC_GROWN(cachep);  	n->free_objects += cachep->num - page->active;  	spin_unlock(&n->list_lock); @@ -2903,9 +2891,10 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,  	/* Move pfmemalloc slab to the end of list to speed up next search */  	list_del(&page->lru); -	if (!page->active) +	if (!page->active) {  		list_add_tail(&page->lru, &n->slabs_free); -	else +		n->free_slabs++; +	} else  		list_add_tail(&page->lru, &n->slabs_partial);  	list_for_each_entry(page, &n->slabs_partial, lru) { @@ -2913,9 +2902,12 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,  			return page;  	} +	n->free_touched = 1;  	list_for_each_entry(page, &n->slabs_free, lru) { -		if (!PageSlabPfmemalloc(page)) +		if (!PageSlabPfmemalloc(page)) { +			n->free_slabs--;  			return page; +		}  	}  	return NULL; @@ -2925,16 +2917,18 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)  {  	struct page *page; -	page = list_first_entry_or_null(&n->slabs_partial, -			struct page, lru); +	assert_spin_locked(&n->list_lock); +	page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);  	if (!page) {  		n->free_touched = 1; -		page = list_first_entry_or_null(&n->slabs_free, -				struct page, lru); +		page = list_first_entry_or_null(&n->slabs_free, struct page, +						lru); +		if (page) +			n->free_slabs--;  	}  	if (sk_memalloc_socks()) -		return get_valid_first_slab(n, page, pfmemalloc); +		page = get_valid_first_slab(n, page, pfmemalloc);  	return page;  } @@ -3434,9 +3428,10 @@ static void free_block(struct kmem_cache *cachep, void **objpp,  		STATS_DEC_ACTIVE(cachep);  		/* fixup slab chains */ -		if (page->active == 0) +		if (page->active == 0) {  			list_add(&page->lru, &n->slabs_free); -		else { +			n->free_slabs++; +		} else {  			/* Unconditionally move a slab to the end of the  			 * partial list on free - maximum time for the  			 * other objects to be freed, too. @@ -3450,7 +3445,8 @@ static void free_block(struct kmem_cache *cachep, void **objpp,  		page = list_last_entry(&n->slabs_free, struct page, lru);  		list_move(&page->lru, list); -		n->num_slabs--; +		n->free_slabs--; +		n->total_slabs--;  	}  } @@ -4102,64 +4098,33 @@ out:  #ifdef CONFIG_SLABINFO  void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)  { -	struct page *page; -	unsigned long active_objs; -	unsigned long num_objs; -	unsigned long active_slabs = 0; -	unsigned long num_slabs, free_objects = 0, shared_avail = 0; -	unsigned long num_slabs_partial = 0, num_slabs_free = 0; -	unsigned long num_slabs_full = 0; -	const char *name; -	char *error = NULL; +	unsigned long active_objs, num_objs, active_slabs; +	unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0; +	unsigned long free_slabs = 0;  	int node;  	struct kmem_cache_node *n; -	active_objs = 0; -	num_slabs = 0;  	for_each_kmem_cache_node(cachep, node, n) { -  		check_irq_on();  		spin_lock_irq(&n->list_lock); -		num_slabs += n->num_slabs; +		total_slabs += n->total_slabs; +		free_slabs += n->free_slabs; +		free_objs += n->free_objects; -		list_for_each_entry(page, &n->slabs_partial, lru) { -			if (page->active == cachep->num && !error) -				error = "slabs_partial accounting error"; -			if (!page->active && !error) -				error = "slabs_partial accounting error"; -			active_objs += page->active; -			num_slabs_partial++; -		} - -		list_for_each_entry(page, &n->slabs_free, lru) { -			if (page->active && !error) -				error = "slabs_free accounting error"; -			num_slabs_free++; -		} - -		free_objects += n->free_objects;  		if (n->shared)  			shared_avail += n->shared->avail;  		spin_unlock_irq(&n->list_lock);  	} -	num_objs = num_slabs * cachep->num; -	active_slabs = num_slabs - num_slabs_free; -	num_slabs_full = num_slabs - (num_slabs_partial + num_slabs_free); -	active_objs += (num_slabs_full * cachep->num); - -	if (num_objs - active_objs != free_objects && !error) -		error = "free_objects accounting error"; - -	name = cachep->name; -	if (error) -		pr_err("slab: cache %s error: %s\n", name, error); +	num_objs = total_slabs * cachep->num; +	active_slabs = total_slabs - free_slabs; +	active_objs = num_objs - free_objs;  	sinfo->active_objs = active_objs;  	sinfo->num_objs = num_objs;  	sinfo->active_slabs = active_slabs; -	sinfo->num_slabs = num_slabs; +	sinfo->num_slabs = total_slabs;  	sinfo->shared_avail = shared_avail;  	sinfo->limit = cachep->limit;  	sinfo->batchcount = cachep->batchcount;  |