diff options
Diffstat (limited to 'mm/slab_common.c')
| -rw-r--r-- | mm/slab_common.c | 48 | 
1 files changed, 16 insertions, 32 deletions
| diff --git a/mm/slab_common.c b/mm/slab_common.c index e5d080a93009..23f2ab0713b7 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -489,9 +489,7 @@ void slab_kmem_cache_release(struct kmem_cache *s)  void kmem_cache_destroy(struct kmem_cache *s)  { -	int err; - -	if (unlikely(!s)) +	if (unlikely(!s) || !kasan_check_byte(s))  		return;  	cpus_read_lock(); @@ -501,12 +499,9 @@ void kmem_cache_destroy(struct kmem_cache *s)  	if (s->refcount)  		goto out_unlock; -	err = shutdown_cache(s); -	if (err) { -		pr_err("%s %s: Slab cache still has objects\n", -		       __func__, s->name); -		dump_stack(); -	} +	WARN(shutdown_cache(s), +	     "%s %s: Slab cache still has objects when called from %pS", +	     __func__, s->name, (void *)_RET_IP_);  out_unlock:  	mutex_unlock(&slab_mutex);  	cpus_read_unlock(); @@ -550,13 +545,13 @@ bool slab_is_available(void)   */  bool kmem_valid_obj(void *object)  { -	struct page *page; +	struct folio *folio;  	/* Some arches consider ZERO_SIZE_PTR to be a valid address. */  	if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))  		return false; -	page = virt_to_head_page(object); -	return PageSlab(page); +	folio = virt_to_folio(object); +	return folio_test_slab(folio);  }  EXPORT_SYMBOL_GPL(kmem_valid_obj); @@ -579,18 +574,18 @@ void kmem_dump_obj(void *object)  {  	char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";  	int i; -	struct page *page; +	struct slab *slab;  	unsigned long ptroffset;  	struct kmem_obj_info kp = { };  	if (WARN_ON_ONCE(!virt_addr_valid(object)))  		return; -	page = virt_to_head_page(object); -	if (WARN_ON_ONCE(!PageSlab(page))) { +	slab = virt_to_slab(object); +	if (WARN_ON_ONCE(!slab)) {  		pr_cont(" non-slab memory.\n");  		return;  	} -	kmem_obj_info(&kp, object, page); +	kmem_obj_info(&kp, object, slab);  	if (kp.kp_slab_cache)  		pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);  	else @@ -824,7 +819,7 @@ void __init setup_kmalloc_cache_index_table(void)  	if (KMALLOC_MIN_SIZE >= 64) {  		/* -		 * The 96 byte size cache is not used if the alignment +		 * The 96 byte sized cache is not used if the alignment  		 * is 64 byte.  		 */  		for (i = 64 + 8; i <= 96; i += 8) @@ -849,7 +844,7 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)  	if (type == KMALLOC_RECLAIM) {  		flags |= SLAB_RECLAIM_ACCOUNT;  	} else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) { -		if (cgroup_memory_nokmem) { +		if (mem_cgroup_kmem_disabled()) {  			kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];  			return;  		} @@ -1044,18 +1039,18 @@ static void print_slabinfo_header(struct seq_file *m)  	seq_putc(m, '\n');  } -void *slab_start(struct seq_file *m, loff_t *pos) +static void *slab_start(struct seq_file *m, loff_t *pos)  {  	mutex_lock(&slab_mutex);  	return seq_list_start(&slab_caches, *pos);  } -void *slab_next(struct seq_file *m, void *p, loff_t *pos) +static void *slab_next(struct seq_file *m, void *p, loff_t *pos)  {  	return seq_list_next(p, &slab_caches, pos);  } -void slab_stop(struct seq_file *m, void *p) +static void slab_stop(struct seq_file *m, void *p)  {  	mutex_unlock(&slab_mutex);  } @@ -1123,17 +1118,6 @@ void dump_unreclaimable_slab(void)  	mutex_unlock(&slab_mutex);  } -#if defined(CONFIG_MEMCG_KMEM) -int memcg_slab_show(struct seq_file *m, void *p) -{ -	/* -	 * Deprecated. -	 * Please, take a look at tools/cgroup/slabinfo.py . -	 */ -	return 0; -} -#endif -  /*   * slabinfo_op - iterator that generates /proc/slabinfo   * |