aboutsummaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2020-12-11 17:47:55 +0000
committerMark Brown <broonie@kernel.org>2020-12-11 17:47:55 +0000
commit031616c434db05ce766f76c62865f55698e0924f (patch)
tree7f29aa1ff3e7b51a8058cd570fb785c6e769b245 /mm/slab.c
parent064841ccfc49b2315dc0b797239862d3a343aa07 (diff)
parent85a7555575a0e48f9b73db310d0d762a08a46d63 (diff)
Merge remote-tracking branch 'asoc/for-5.10' into asoc-linus
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f658e86ec8ce..b1113561b98b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1062,7 +1062,7 @@ int slab_prepare_cpu(unsigned int cpu)
* Even if all the cpus of a node are down, we don't free the
* kmem_cache_node of any cache. This to avoid a race between cpu_down, and
* a kmalloc allocation from another cpu for memory from the node of
- * the cpu going down. The list3 structure is usually allocated from
+ * the cpu going down. The kmem_cache_node structure is usually allocated from
* kmem_cache_create() and gets destroyed at kmem_cache_destroy().
*/
int slab_dead_cpu(unsigned int cpu)
@@ -2305,8 +2305,6 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
/* Slab management obj is off-slab. */
freelist = kmem_cache_alloc_node(cachep->freelist_cache,
local_flags, nodeid);
- if (!freelist)
- return NULL;
} else {
/* We will use last bytes at the slab for freelist */
freelist = addr + (PAGE_SIZE << cachep->gfporder) -
@@ -3440,7 +3438,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
memset(objp, 0, cachep->object_size);
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
- memcg_slab_free_hook(cachep, virt_to_head_page(objp), objp);
+ memcg_slab_free_hook(cachep, &objp, 1);
/*
* Skip calling cache_free_alien() when the platform is not numa.