aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander Popov <[email protected]>2020-12-14 19:04:33 -0800
committerLinus Torvalds <[email protected]>2020-12-15 12:13:37 -0800
commita32d654db543843a5ffb248feaec1a909718addd (patch)
treee7d4d79eb4ff3668d1c394ee4f709c76ccfea77d
parent0c06dd75514327be4b1c22b109341ff7dfeeff98 (diff)
mm/slab: rerform init_on_free earlier
Currently in CONFIG_SLAB init_on_free happens too late, and heap objects go to the heap quarantine not being erased. Lets move init_on_free clearing before calling kasan_slab_free(). In that case heap quarantine will store erased objects, similarly to CONFIG_SLUB=y behavior. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Alexander Popov <[email protected]> Reviewed-by: Alexander Potapenko <[email protected]> Acked-by: David Rientjes <[email protected]> Acked-by: Joonsoo Kim <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Pekka Enberg <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r--mm/slab.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 2e67a513b0c9..176b65e2157d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3417,6 +3417,9 @@ free_done:
static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
unsigned long caller)
{
+ if (unlikely(slab_want_init_on_free(cachep)))
+ memset(objp, 0, cachep->object_size);
+
/* Put the object into the quarantine, don't touch it for now. */
if (kasan_slab_free(cachep, objp, _RET_IP_))
return;
@@ -3435,8 +3438,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
- if (unlikely(slab_want_init_on_free(cachep)))
- memset(objp, 0, cachep->object_size);
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
memcg_slab_free_hook(cachep, &objp, 1);