diff options
Diffstat (limited to 'mm/zsmalloc.c')
| -rw-r--r-- | mm/zsmalloc.c | 123 | 
1 files changed, 22 insertions, 101 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c937635e0ad1..7d7cb3eaabe0 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -110,13 +110,12 @@  #define OBJ_TAG_BITS	1  #define OBJ_TAG_MASK	OBJ_ALLOCATED_TAG -#define OBJ_INDEX_BITS	(BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS) +#define OBJ_INDEX_BITS	(BITS_PER_LONG - _PFN_BITS)  #define OBJ_INDEX_MASK	((_AC(1, UL) << OBJ_INDEX_BITS) - 1)  #define HUGE_BITS	1  #define FULLNESS_BITS	4  #define CLASS_BITS	8 -#define ISOLATED_BITS	5  #define MAGIC_VAL_BITS	8  #define MAX(a, b) ((a) >= (b) ? (a) : (b)) @@ -246,7 +245,6 @@ struct zspage {  		unsigned int huge:HUGE_BITS;  		unsigned int fullness:FULLNESS_BITS;  		unsigned int class:CLASS_BITS + 1; -		unsigned int isolated:ISOLATED_BITS;  		unsigned int magic:MAGIC_VAL_BITS;  	};  	unsigned int inuse; @@ -278,18 +276,14 @@ static bool ZsHugePage(struct zspage *zspage)  static void migrate_lock_init(struct zspage *zspage);  static void migrate_read_lock(struct zspage *zspage);  static void migrate_read_unlock(struct zspage *zspage); - -#ifdef CONFIG_COMPACTION  static void migrate_write_lock(struct zspage *zspage); -static void migrate_write_lock_nested(struct zspage *zspage);  static void migrate_write_unlock(struct zspage *zspage); + +#ifdef CONFIG_COMPACTION  static void kick_deferred_free(struct zs_pool *pool);  static void init_deferred_free(struct zs_pool *pool);  static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);  #else -static void migrate_write_lock(struct zspage *zspage) {} -static void migrate_write_lock_nested(struct zspage *zspage) {} -static void migrate_write_unlock(struct zspage *zspage) {}  static void kick_deferred_free(struct zs_pool *pool) {}  static void init_deferred_free(struct zs_pool *pool) {}  static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} @@ -476,30 +470,12 @@ static inline void set_freeobj(struct zspage *zspage, unsigned int obj)  	zspage->freeobj = obj;  } -static void get_zspage_mapping(struct zspage *zspage, -			       unsigned int *class_idx, -			       int *fullness) -{ -	BUG_ON(zspage->magic != ZSPAGE_MAGIC); - -	*fullness = zspage->fullness; -	*class_idx = zspage->class; -} -  static struct size_class *zspage_class(struct zs_pool *pool,  				       struct zspage *zspage)  {  	return pool->size_class[zspage->class];  } -static void set_zspage_mapping(struct zspage *zspage, -			       unsigned int class_idx, -			       int fullness) -{ -	zspage->class = class_idx; -	zspage->fullness = fullness; -} -  /*   * zsmalloc divides the pool into various size classes where each   * class maintains a list of zspages where each zspage is divided @@ -694,16 +670,17 @@ static void insert_zspage(struct size_class *class,  {  	class_stat_inc(class, fullness, 1);  	list_add(&zspage->list, &class->fullness_list[fullness]); +	zspage->fullness = fullness;  }  /*   * This function removes the given zspage from the freelist identified   * by <class, fullness_group>.   */ -static void remove_zspage(struct size_class *class, -				struct zspage *zspage, -				int fullness) +static void remove_zspage(struct size_class *class, struct zspage *zspage)  { +	int fullness = zspage->fullness; +  	VM_BUG_ON(list_empty(&class->fullness_list[fullness]));  	list_del_init(&zspage->list); @@ -721,17 +698,14 @@ static void remove_zspage(struct size_class *class,   */  static int fix_fullness_group(struct size_class *class, struct zspage *zspage)  { -	int class_idx; -	int currfg, newfg; +	int newfg; -	get_zspage_mapping(zspage, &class_idx, &currfg);  	newfg = get_fullness_group(class, zspage); -	if (newfg == currfg) +	if (newfg == zspage->fullness)  		goto out; -	remove_zspage(class, zspage, currfg); +	remove_zspage(class, zspage);  	insert_zspage(class, zspage, newfg); -	set_zspage_mapping(zspage, class_idx, newfg);  out:  	return newfg;  } @@ -763,14 +737,12 @@ static struct page *get_next_page(struct page *page)  static void obj_to_location(unsigned long obj, struct page **page,  				unsigned int *obj_idx)  { -	obj >>= OBJ_TAG_BITS;  	*page = pfn_to_page(obj >> OBJ_INDEX_BITS);  	*obj_idx = (obj & OBJ_INDEX_MASK);  }  static void obj_to_page(unsigned long obj, struct page **page)  { -	obj >>= OBJ_TAG_BITS;  	*page = pfn_to_page(obj >> OBJ_INDEX_BITS);  } @@ -785,7 +757,6 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)  	obj = page_to_pfn(page) << OBJ_INDEX_BITS;  	obj |= obj_idx & OBJ_INDEX_MASK; -	obj <<= OBJ_TAG_BITS;  	return obj;  } @@ -849,15 +820,11 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,  				struct zspage *zspage)  {  	struct page *page, *next; -	int fg; -	unsigned int class_idx; - -	get_zspage_mapping(zspage, &class_idx, &fg);  	assert_spin_locked(&pool->lock);  	VM_BUG_ON(get_zspage_inuse(zspage)); -	VM_BUG_ON(fg != ZS_INUSE_RATIO_0); +	VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0);  	next = page = get_first_page(zspage);  	do { @@ -892,7 +859,7 @@ static void free_zspage(struct zs_pool *pool, struct size_class *class,  		return;  	} -	remove_zspage(class, zspage, ZS_INUSE_RATIO_0); +	remove_zspage(class, zspage);  	__free_zspage(pool, class, zspage);  } @@ -1011,6 +978,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,  	create_page_chain(class, zspage, pages);  	init_zspage(class, zspage);  	zspage->pool = pool; +	zspage->class = class->index;  	return zspage;  } @@ -1403,7 +1371,6 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)  	obj = obj_malloc(pool, zspage, handle);  	newfg = get_fullness_group(class, zspage);  	insert_zspage(class, zspage, newfg); -	set_zspage_mapping(zspage, class->index, newfg);  	record_obj(handle, obj);  	atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);  	class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); @@ -1623,7 +1590,7 @@ static struct zspage *isolate_src_zspage(struct size_class *class)  		zspage = list_first_entry_or_null(&class->fullness_list[fg],  						  struct zspage, list);  		if (zspage) { -			remove_zspage(class, zspage, fg); +			remove_zspage(class, zspage);  			return zspage;  		}  	} @@ -1640,7 +1607,7 @@ static struct zspage *isolate_dst_zspage(struct size_class *class)  		zspage = list_first_entry_or_null(&class->fullness_list[fg],  						  struct zspage, list);  		if (zspage) { -			remove_zspage(class, zspage, fg); +			remove_zspage(class, zspage);  			return zspage;  		}  	} @@ -1661,7 +1628,6 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage)  	fullness = get_fullness_group(class, zspage);  	insert_zspage(class, zspage, fullness); -	set_zspage_mapping(zspage, class->index, fullness);  	return fullness;  } @@ -1725,33 +1691,17 @@ static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)  	read_unlock(&zspage->lock);  } -#ifdef CONFIG_COMPACTION  static void migrate_write_lock(struct zspage *zspage)  {  	write_lock(&zspage->lock);  } -static void migrate_write_lock_nested(struct zspage *zspage) -{ -	write_lock_nested(&zspage->lock, SINGLE_DEPTH_NESTING); -} -  static void migrate_write_unlock(struct zspage *zspage)  {  	write_unlock(&zspage->lock);  } -/* Number of isolated subpage for *page migration* in this zspage */ -static void inc_zspage_isolation(struct zspage *zspage) -{ -	zspage->isolated++; -} - -static void dec_zspage_isolation(struct zspage *zspage) -{ -	VM_BUG_ON(zspage->isolated == 0); -	zspage->isolated--; -} +#ifdef CONFIG_COMPACTION  static const struct movable_operations zsmalloc_mops; @@ -1780,21 +1730,12 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,  static bool zs_page_isolate(struct page *page, isolate_mode_t mode)  { -	struct zs_pool *pool; -	struct zspage *zspage; -  	/*  	 * Page is locked so zspage couldn't be destroyed. For detail, look at  	 * lock_zspage in free_zspage.  	 */  	VM_BUG_ON_PAGE(PageIsolated(page), page); -	zspage = get_zspage(page); -	pool = zspage->pool; -	spin_lock(&pool->lock); -	inc_zspage_isolation(zspage); -	spin_unlock(&pool->lock); -  	return true;  } @@ -1859,7 +1800,6 @@ static int zs_page_migrate(struct page *newpage, struct page *page,  	kunmap_atomic(s_addr);  	replace_sub_page(class, zspage, newpage, page); -	dec_zspage_isolation(zspage);  	/*  	 * Since we complete the data copy and set up new zspage structure,  	 * it's okay to release the pool's lock. @@ -1881,16 +1821,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,  static void zs_page_putback(struct page *page)  { -	struct zs_pool *pool; -	struct zspage *zspage; -  	VM_BUG_ON_PAGE(!PageIsolated(page), page); - -	zspage = get_zspage(page); -	pool = zspage->pool; -	spin_lock(&pool->lock); -	dec_zspage_isolation(zspage); -	spin_unlock(&pool->lock);  }  static const struct movable_operations zsmalloc_mops = { @@ -1907,8 +1838,6 @@ static void async_free_zspage(struct work_struct *work)  {  	int i;  	struct size_class *class; -	unsigned int class_idx; -	int fullness;  	struct zspage *zspage, *tmp;  	LIST_HEAD(free_pages);  	struct zs_pool *pool = container_of(work, struct zs_pool, @@ -1929,10 +1858,8 @@ static void async_free_zspage(struct work_struct *work)  		list_del(&zspage->list);  		lock_zspage(zspage); -		get_zspage_mapping(zspage, &class_idx, &fullness); -		VM_BUG_ON(fullness != ZS_INUSE_RATIO_0); -		class = pool->size_class[class_idx];  		spin_lock(&pool->lock); +		class = zspage_class(pool, zspage);  		__free_zspage(pool, class, zspage);  		spin_unlock(&pool->lock);  	} @@ -2006,19 +1933,17 @@ static unsigned long __zs_compact(struct zs_pool *pool,  			dst_zspage = isolate_dst_zspage(class);  			if (!dst_zspage)  				break; -			migrate_write_lock(dst_zspage);  		}  		src_zspage = isolate_src_zspage(class);  		if (!src_zspage)  			break; -		migrate_write_lock_nested(src_zspage); - +		migrate_write_lock(src_zspage);  		migrate_zspage(pool, src_zspage, dst_zspage); -		fg = putback_zspage(class, src_zspage);  		migrate_write_unlock(src_zspage); +		fg = putback_zspage(class, src_zspage);  		if (fg == ZS_INUSE_RATIO_0) {  			free_zspage(pool, class, src_zspage);  			pages_freed += class->pages_per_zspage; @@ -2028,7 +1953,6 @@ static unsigned long __zs_compact(struct zs_pool *pool,  		if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100  		    || spin_is_contended(&pool->lock)) {  			putback_zspage(class, dst_zspage); -			migrate_write_unlock(dst_zspage);  			dst_zspage = NULL;  			spin_unlock(&pool->lock); @@ -2037,15 +1961,12 @@ static unsigned long __zs_compact(struct zs_pool *pool,  		}  	} -	if (src_zspage) { +	if (src_zspage)  		putback_zspage(class, src_zspage); -		migrate_write_unlock(src_zspage); -	} -	if (dst_zspage) { +	if (dst_zspage)  		putback_zspage(class, dst_zspage); -		migrate_write_unlock(dst_zspage); -	} +  	spin_unlock(&pool->lock);  	return pages_freed;  |