diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2024-08-07 12:31:15 +0200 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2024-08-27 14:12:50 +0200 |
commit | 4ec10268ed98a3d568a39861e7b7d0a0fa7cbe60 (patch) | |
tree | 2929fbd2e05b522cfb1d96a8214b9609f02adfba /mm | |
parent | b5959789e96e01b4b27a6d0354b475398d67aa6f (diff) |
mm, slab: unlink slabinfo, sysfs and debugfs immediately
kmem_cache_destroy() includes removing the associated sysfs and debugfs
directories, and the cache from the list of caches that appears in
/proc/slabinfo. Currently this might not happen immediately when:
- the cache is SLAB_TYPESAFE_BY_RCU and the cleanup is delayed,
including the directores removal
- __kmem_cache_shutdown() fails due to outstanding objects - the
directories remain indefinitely
When a cache is recreated with the same name, such as due to module
unload followed by a load, the directories will fail to be recreated for
the new instance of the cache due to the old directories being present.
The cache will also appear twice in /proc/slabinfo.
While we want to convert the SLAB_TYPESAFE_BY_RCU cleanup to be
synchronous again, the second point remains. So let's fix this first and
have the directories and slabinfo removed immediately in
kmem_cache_destroy() and regardless of __kmem_cache_shutdown() success.
This should not make debugging harder if __kmem_cache_shutdown() fails,
because a detailed report of outstanding objects is printed into dmesg
already due to the failure.
Also simplify kmem_cache_release() sysfs handling by using
__is_defined(SLAB_SUPPORTS_SYSFS).
Note the resulting code in kmem_cache_destroy() is a bit ugly but will
be further simplified - this is in order to make small bisectable steps.
Reviewed-by: Jann Horn <jannh@google.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab_common.c | 57 |
1 files changed, 26 insertions, 31 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index b76d65d7fe33..db61df3b4282 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -484,31 +484,19 @@ fail: } EXPORT_SYMBOL(kmem_buckets_create); -#ifdef SLAB_SUPPORTS_SYSFS /* * For a given kmem_cache, kmem_cache_destroy() should only be called * once or there will be a use-after-free problem. The actual deletion * and release of the kobject does not need slab_mutex or cpu_hotplug_lock * protection. So they are now done without holding those locks. - * - * Note that there will be a slight delay in the deletion of sysfs files - * if kmem_cache_release() is called indrectly from a work function. */ static void kmem_cache_release(struct kmem_cache *s) { - if (slab_state >= FULL) { - sysfs_slab_unlink(s); + if (__is_defined(SLAB_SUPPORTS_SYSFS) && slab_state >= FULL) sysfs_slab_release(s); - } else { + else slab_kmem_cache_release(s); - } } -#else -static void kmem_cache_release(struct kmem_cache *s) -{ - slab_kmem_cache_release(s); -} -#endif static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) { @@ -534,7 +522,6 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) rcu_barrier(); list_for_each_entry_safe(s, s2, &to_destroy, list) { - debugfs_slab_release(s); kfence_shutdown_cache(s); kmem_cache_release(s); } @@ -549,8 +536,8 @@ void slab_kmem_cache_release(struct kmem_cache *s) void kmem_cache_destroy(struct kmem_cache *s) { - int err = -EBUSY; bool rcu_set; + int err; if (unlikely(!s) || !kasan_check_byte(s)) return; @@ -558,11 +545,14 @@ void kmem_cache_destroy(struct kmem_cache *s) cpus_read_lock(); mutex_lock(&slab_mutex); - rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU; - s->refcount--; - if (s->refcount) - goto out_unlock; + if (s->refcount) { + mutex_unlock(&slab_mutex); + cpus_read_unlock(); + return; + } + + rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU; /* free asan quarantined objects */ kasan_cache_shutdown(s); @@ -571,24 +561,29 @@ void kmem_cache_destroy(struct kmem_cache *s) WARN(err, "%s %s: Slab cache still has objects when called from %pS", __func__, s->name, (void *)_RET_IP_); - if (err) - goto out_unlock; - list_del(&s->list); - if (rcu_set) { - list_add_tail(&s->list, &slab_caches_to_rcu_destroy); - schedule_work(&slab_caches_to_rcu_destroy_work); - } else { + if (!err && !rcu_set) kfence_shutdown_cache(s); - debugfs_slab_release(s); - } -out_unlock: mutex_unlock(&slab_mutex); cpus_read_unlock(); - if (!err && !rcu_set) + + if (slab_state >= FULL) + sysfs_slab_unlink(s); + debugfs_slab_release(s); + + if (err) + return; + + if (rcu_set) { + mutex_lock(&slab_mutex); + list_add_tail(&s->list, &slab_caches_to_rcu_destroy); + schedule_work(&slab_caches_to_rcu_destroy_work); + mutex_unlock(&slab_mutex); + } else { kmem_cache_release(s); + } } EXPORT_SYMBOL(kmem_cache_destroy); |