diff options
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r-- | mm/slab_common.c | 51 |
1 files changed, 38 insertions, 13 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index 7eb8dc136c1c..81732d05e74a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -406,8 +406,9 @@ out_free_cache: goto out; } -/* - * kmem_cache_create_usercopy - Create a cache. +/** + * kmem_cache_create_usercopy - Create a cache with a region suitable + * for copying to userspace * @name: A string which is used in /proc/slabinfo to identify this cache. * @size: The size of objects to be created in this cache. * @align: The required alignment for the objects. @@ -416,7 +417,6 @@ out_free_cache: * @usersize: Usercopy region size * @ctor: A constructor for the objects. * - * Returns a ptr to the cache on success, NULL on failure. * Cannot be called within a interrupt, but can be interrupted. * The @ctor is run when new pages are allocated by the cache. * @@ -425,12 +425,14 @@ out_free_cache: * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) * to catch references to uninitialised memory. * - * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check + * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check * for buffer overruns. * * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware * cacheline. This can be beneficial if you're counting cycles as closely * as davem. + * + * Return: a pointer to the cache on success, NULL on failure. */ struct kmem_cache * kmem_cache_create_usercopy(const char *name, @@ -514,6 +516,31 @@ out_unlock: } EXPORT_SYMBOL(kmem_cache_create_usercopy); +/** + * kmem_cache_create - Create a cache. + * @name: A string which is used in /proc/slabinfo to identify this cache. + * @size: The size of objects to be created in this cache. + * @align: The required alignment for the objects. + * @flags: SLAB flags + * @ctor: A constructor for the objects. + * + * Cannot be called within a interrupt, but can be interrupted. + * The @ctor is run when new pages are allocated by the cache. + * + * The flags are + * + * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) + * to catch references to uninitialised memory. + * + * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check + * for buffer overruns. + * + * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware + * cacheline. This can be beneficial if you're counting cycles as closely + * as davem. + * + * Return: a pointer to the cache on success, NULL on failure. + */ struct kmem_cache * kmem_cache_create(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, void (*ctor)(void *)) @@ -724,7 +751,7 @@ void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, css_get(&s->memcg_params.memcg->css); s->memcg_params.deact_fn = deact_fn; - call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn); + call_rcu(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn); } void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) @@ -839,11 +866,11 @@ static void flush_memcg_workqueue(struct kmem_cache *s) mutex_unlock(&slab_mutex); /* - * SLUB deactivates the kmem_caches through call_rcu_sched. Make + * SLUB deactivates the kmem_caches through call_rcu. Make * sure all registered rcu callbacks have been invoked. */ if (IS_ENABLED(CONFIG_SLUB)) - rcu_barrier_sched(); + rcu_barrier(); /* * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB @@ -1029,10 +1056,8 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) index = size_index[size_index_elem(size)]; } else { - if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { - WARN_ON(1); + if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE)) return NULL; - } index = fls(size - 1); } @@ -1204,7 +1229,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) page = alloc_pages(flags, order); ret = page ? page_address(page) : NULL; kmemleak_alloc(ret, size, 1, flags); - kasan_kmalloc_large(ret, size, flags); + ret = kasan_kmalloc_large(ret, size, flags); return ret; } EXPORT_SYMBOL(kmalloc_order); @@ -1482,7 +1507,7 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size, ks = ksize(p); if (ks >= new_size) { - kasan_krealloc((void *)p, new_size, flags); + p = kasan_krealloc((void *)p, new_size, flags); return (void *)p; } @@ -1534,7 +1559,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) } ret = __do_krealloc(p, new_size, flags); - if (ret && p != ret) + if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret)) kfree(p); return ret; |