aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab_common.c57
1 files changed, 26 insertions, 31 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index b76d65d7fe33..db61df3b4282 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -484,31 +484,19 @@ fail:
}
EXPORT_SYMBOL(kmem_buckets_create);
-#ifdef SLAB_SUPPORTS_SYSFS
/*
* For a given kmem_cache, kmem_cache_destroy() should only be called
* once or there will be a use-after-free problem. The actual deletion
* and release of the kobject does not need slab_mutex or cpu_hotplug_lock
* protection. So they are now done without holding those locks.
- *
- * Note that there will be a slight delay in the deletion of sysfs files
- * if kmem_cache_release() is called indrectly from a work function.
*/
static void kmem_cache_release(struct kmem_cache *s)
{
- if (slab_state >= FULL) {
- sysfs_slab_unlink(s);
+ if (__is_defined(SLAB_SUPPORTS_SYSFS) && slab_state >= FULL)
sysfs_slab_release(s);
- } else {
+ else
slab_kmem_cache_release(s);
- }
}
-#else
-static void kmem_cache_release(struct kmem_cache *s)
-{
- slab_kmem_cache_release(s);
-}
-#endif
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
{
@@ -534,7 +522,6 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
rcu_barrier();
list_for_each_entry_safe(s, s2, &to_destroy, list) {
- debugfs_slab_release(s);
kfence_shutdown_cache(s);
kmem_cache_release(s);
}
@@ -549,8 +536,8 @@ void slab_kmem_cache_release(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s)
{
- int err = -EBUSY;
bool rcu_set;
+ int err;
if (unlikely(!s) || !kasan_check_byte(s))
return;
@@ -558,11 +545,14 @@ void kmem_cache_destroy(struct kmem_cache *s)
cpus_read_lock();
mutex_lock(&slab_mutex);
- rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
-
s->refcount--;
- if (s->refcount)
- goto out_unlock;
+ if (s->refcount) {
+ mutex_unlock(&slab_mutex);
+ cpus_read_unlock();
+ return;
+ }
+
+ rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
/* free asan quarantined objects */
kasan_cache_shutdown(s);
@@ -571,24 +561,29 @@ void kmem_cache_destroy(struct kmem_cache *s)
WARN(err, "%s %s: Slab cache still has objects when called from %pS",
__func__, s->name, (void *)_RET_IP_);
- if (err)
- goto out_unlock;
-
list_del(&s->list);
- if (rcu_set) {
- list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
- schedule_work(&slab_caches_to_rcu_destroy_work);
- } else {
+ if (!err && !rcu_set)
kfence_shutdown_cache(s);
- debugfs_slab_release(s);
- }
-out_unlock:
mutex_unlock(&slab_mutex);
cpus_read_unlock();
- if (!err && !rcu_set)
+
+ if (slab_state >= FULL)
+ sysfs_slab_unlink(s);
+ debugfs_slab_release(s);
+
+ if (err)
+ return;
+
+ if (rcu_set) {
+ mutex_lock(&slab_mutex);
+ list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
+ schedule_work(&slab_caches_to_rcu_destroy_work);
+ mutex_unlock(&slab_mutex);
+ } else {
kmem_cache_release(s);
+ }
}
EXPORT_SYMBOL(kmem_cache_destroy);