diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.h | 4 | ||||
-rw-r--r-- | mm/slab_common.c | 139 | ||||
-rw-r--r-- | mm/slub.c | 160 |
3 files changed, 127 insertions, 176 deletions
diff --git a/mm/slab.h b/mm/slab.h index 9f907e930609..f22fb760b286 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -422,7 +422,9 @@ kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller) gfp_t kmalloc_fix_flags(gfp_t flags); /* Functions provided by the slab allocators */ -int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); +int do_kmem_cache_create(struct kmem_cache *s, const char *name, + unsigned int size, struct kmem_cache_args *args, + slab_flags_t flags); void __init kmem_cache_init(void); extern void create_boot_cache(struct kmem_cache *, const char *name, diff --git a/mm/slab_common.c b/mm/slab_common.c index 11ef221bce17..61f32420230a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -215,32 +215,29 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, } static struct kmem_cache *create_cache(const char *name, - unsigned int object_size, unsigned int align, - slab_flags_t flags, unsigned int useroffset, - unsigned int usersize, void (*ctor)(void *), - struct kmem_cache *root_cache) + unsigned int object_size, + struct kmem_cache_args *args, + slab_flags_t flags) { struct kmem_cache *s; int err; - if (WARN_ON(useroffset + usersize > object_size)) - useroffset = usersize = 0; + if (WARN_ON(args->useroffset + args->usersize > object_size)) + args->useroffset = args->usersize = 0; + + /* If a custom freelist pointer is requested make sure it's sane. */ + err = -EINVAL; + if (args->use_freeptr_offset && + (args->freeptr_offset >= object_size || + !(flags & SLAB_TYPESAFE_BY_RCU) || + !IS_ALIGNED(args->freeptr_offset, sizeof(freeptr_t)))) + goto out; err = -ENOMEM; s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); if (!s) goto out; - - s->name = name; - s->size = s->object_size = object_size; - s->align = align; - s->ctor = ctor; -#ifdef CONFIG_HARDENED_USERCOPY - s->useroffset = useroffset; - s->usersize = usersize; -#endif - - err = __kmem_cache_create(s, flags); + err = do_kmem_cache_create(s, name, object_size, args, flags); if (err) goto out_free_cache; @@ -255,39 +252,24 @@ out: } /** - * kmem_cache_create_usercopy - Create a cache with a region suitable - * for copying to userspace + * __kmem_cache_create_args - Create a kmem cache. * @name: A string which is used in /proc/slabinfo to identify this cache. - * @size: The size of objects to be created in this cache. - * @align: The required alignment for the objects. - * @flags: SLAB flags - * @useroffset: Usercopy region offset - * @usersize: Usercopy region size - * @ctor: A constructor for the objects. + * @object_size: The size of objects to be created in this cache. + * @args: Additional arguments for the cache creation (see + * &struct kmem_cache_args). + * @flags: See %SLAB_* flags for an explanation of individual @flags. * - * Cannot be called within a interrupt, but can be interrupted. - * The @ctor is run when new pages are allocated by the cache. + * Not to be called directly, use the kmem_cache_create() wrapper with the same + * parameters. * - * The flags are - * - * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) - * to catch references to uninitialised memory. - * - * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check - * for buffer overruns. - * - * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware - * cacheline. This can be beneficial if you're counting cycles as closely - * as davem. + * Context: Cannot be called within a interrupt, but can be interrupted. * * Return: a pointer to the cache on success, NULL on failure. */ -struct kmem_cache * -kmem_cache_create_usercopy(const char *name, - unsigned int size, unsigned int align, - slab_flags_t flags, - unsigned int useroffset, unsigned int usersize, - void (*ctor)(void *)) +struct kmem_cache *__kmem_cache_create_args(const char *name, + unsigned int object_size, + struct kmem_cache_args *args, + slab_flags_t flags) { struct kmem_cache *s = NULL; const char *cache_name; @@ -309,7 +291,7 @@ kmem_cache_create_usercopy(const char *name, mutex_lock(&slab_mutex); - err = kmem_cache_sanity_check(name, size); + err = kmem_cache_sanity_check(name, object_size); if (err) { goto out_unlock; } @@ -330,12 +312,14 @@ kmem_cache_create_usercopy(const char *name, /* Fail closed on bad usersize of useroffset values. */ if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) || - WARN_ON(!usersize && useroffset) || - WARN_ON(size < usersize || size - usersize < useroffset)) - usersize = useroffset = 0; - - if (!usersize) - s = __kmem_cache_alias(name, size, align, flags, ctor); + WARN_ON(!args->usersize && args->useroffset) || + WARN_ON(object_size < args->usersize || + object_size - args->usersize < args->useroffset)) + args->usersize = args->useroffset = 0; + + if (!args->usersize) + s = __kmem_cache_alias(name, object_size, args->align, flags, + args->ctor); if (s) goto out_unlock; @@ -345,9 +329,8 @@ kmem_cache_create_usercopy(const char *name, goto out_unlock; } - s = create_cache(cache_name, size, - calculate_alignment(flags, align, size), - flags, useroffset, usersize, ctor, NULL); + args->align = calculate_alignment(flags, args->align, object_size); + s = create_cache(cache_name, object_size, args, flags); if (IS_ERR(s)) { err = PTR_ERR(s); kfree_const(cache_name); @@ -369,41 +352,7 @@ out_unlock: } return s; } -EXPORT_SYMBOL(kmem_cache_create_usercopy); - -/** - * kmem_cache_create - Create a cache. - * @name: A string which is used in /proc/slabinfo to identify this cache. - * @size: The size of objects to be created in this cache. - * @align: The required alignment for the objects. - * @flags: SLAB flags - * @ctor: A constructor for the objects. - * - * Cannot be called within a interrupt, but can be interrupted. - * The @ctor is run when new pages are allocated by the cache. - * - * The flags are - * - * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) - * to catch references to uninitialised memory. - * - * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check - * for buffer overruns. - * - * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware - * cacheline. This can be beneficial if you're counting cycles as closely - * as davem. - * - * Return: a pointer to the cache on success, NULL on failure. - */ -struct kmem_cache * -kmem_cache_create(const char *name, unsigned int size, unsigned int align, - slab_flags_t flags, void (*ctor)(void *)) -{ - return kmem_cache_create_usercopy(name, size, align, flags, 0, 0, - ctor); -} -EXPORT_SYMBOL(kmem_cache_create); +EXPORT_SYMBOL(__kmem_cache_create_args); static struct kmem_cache *kmem_buckets_cache __ro_after_init; @@ -689,9 +638,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, { int err; unsigned int align = ARCH_KMALLOC_MINALIGN; - - s->name = name; - s->size = s->object_size = size; + struct kmem_cache_args kmem_args = {}; /* * kmalloc caches guarantee alignment of at least the largest @@ -700,14 +647,14 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, */ if (flags & SLAB_KMALLOC) align = max(align, 1U << (ffs(size) - 1)); - s->align = calculate_alignment(flags, align, size); + kmem_args.align = calculate_alignment(flags, align, size); #ifdef CONFIG_HARDENED_USERCOPY - s->useroffset = useroffset; - s->usersize = usersize; + kmem_args.useroffset = useroffset; + kmem_args.usersize = usersize; #endif - err = __kmem_cache_create(s, flags); + err = do_kmem_cache_create(s, name, size, &kmem_args, flags); if (err) panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n", diff --git a/mm/slub.c b/mm/slub.c index 81cea762d094..3d454888e921 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -466,12 +466,6 @@ static struct workqueue_struct *flushwq; *******************************************************************/ /* - * freeptr_t represents a SLUB freelist pointer, which might be encoded - * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. - */ -typedef struct { unsigned long v; } freeptr_t; - -/* * Returns freelist pointer (ptr). With hardening, this is obfuscated * with an XOR of the address where the pointer is held and a per-cache * random number. @@ -4019,6 +4013,8 @@ static void *__slab_alloc_node(struct kmem_cache *s, /* * If the object has been wiped upon free, make sure it's fully initialized by * zeroing out freelist pointer. + * + * Note that we also wipe custom freelist pointers. */ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, void *obj) @@ -5282,7 +5278,7 @@ static void set_cpu_partial(struct kmem_cache *s) * calculate_sizes() determines the order and the distribution of data within * a slab object. */ -static int calculate_sizes(struct kmem_cache *s) +static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s) { slab_flags_t flags = s->flags; unsigned int size = s->object_size; @@ -5323,7 +5319,8 @@ static int calculate_sizes(struct kmem_cache *s) */ s->inuse = size; - if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || s->ctor || + if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) || + (flags & SLAB_POISON) || s->ctor || ((flags & SLAB_RED_ZONE) && (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) { /* @@ -5344,6 +5341,8 @@ static int calculate_sizes(struct kmem_cache *s) */ s->offset = size; size += sizeof(void *); + } else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) { + s->offset = args->freeptr_offset; } else { /* * Store freelist pointer near middle of object to keep @@ -5418,65 +5417,6 @@ static int calculate_sizes(struct kmem_cache *s) return !!oo_objects(s->oo); } -static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) -{ - s->flags = kmem_cache_flags(flags, s->name); -#ifdef CONFIG_SLAB_FREELIST_HARDENED - s->random = get_random_long(); -#endif - - if (!calculate_sizes(s)) - goto error; - if (disable_higher_order_debug) { - /* - * Disable debugging flags that store metadata if the min slab - * order increased. - */ - if (get_order(s->size) > get_order(s->object_size)) { - s->flags &= ~DEBUG_METADATA_FLAGS; - s->offset = 0; - if (!calculate_sizes(s)) - goto error; - } - } - -#ifdef system_has_freelist_aba - if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) { - /* Enable fast mode */ - s->flags |= __CMPXCHG_DOUBLE; - } -#endif - - /* - * The larger the object size is, the more slabs we want on the partial - * list to avoid pounding the page allocator excessively. - */ - s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2); - s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial); - - set_cpu_partial(s); - -#ifdef CONFIG_NUMA - s->remote_node_defrag_ratio = 1000; -#endif - - /* Initialize the pre-computed randomized freelist if slab is up */ - if (slab_state >= UP) { - if (init_cache_random_seq(s)) - goto error; - } - - if (!init_kmem_cache_nodes(s)) - goto error; - - if (alloc_kmem_cache_cpus(s)) - return 0; - -error: - __kmem_cache_release(s); - return -EINVAL; -} - static void list_slab_objects(struct kmem_cache *s, struct slab *slab, const char *text) { @@ -6030,28 +5970,90 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, return s; } -int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) +int do_kmem_cache_create(struct kmem_cache *s, const char *name, + unsigned int size, struct kmem_cache_args *args, + slab_flags_t flags) { - int err; + int err = -EINVAL; - err = kmem_cache_open(s, flags); - if (err) - return err; + s->name = name; + s->size = s->object_size = size; + + s->flags = kmem_cache_flags(flags, s->name); +#ifdef CONFIG_SLAB_FREELIST_HARDENED + s->random = get_random_long(); +#endif + s->align = args->align; + s->ctor = args->ctor; +#ifdef CONFIG_HARDENED_USERCOPY + s->useroffset = args->useroffset; + s->usersize = args->usersize; +#endif + + if (!calculate_sizes(args, s)) + goto out; + if (disable_higher_order_debug) { + /* + * Disable debugging flags that store metadata if the min slab + * order increased. + */ + if (get_order(s->size) > get_order(s->object_size)) { + s->flags &= ~DEBUG_METADATA_FLAGS; + s->offset = 0; + if (!calculate_sizes(args, s)) + goto out; + } + } + +#ifdef system_has_freelist_aba + if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) { + /* Enable fast mode */ + s->flags |= __CMPXCHG_DOUBLE; + } +#endif + + /* + * The larger the object size is, the more slabs we want on the partial + * list to avoid pounding the page allocator excessively. + */ + s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2); + s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial); + + set_cpu_partial(s); + +#ifdef CONFIG_NUMA + s->remote_node_defrag_ratio = 1000; +#endif + + /* Initialize the pre-computed randomized freelist if slab is up */ + if (slab_state >= UP) { + if (init_cache_random_seq(s)) + goto out; + } + + if (!init_kmem_cache_nodes(s)) + goto out; + + if (!alloc_kmem_cache_cpus(s)) + goto out; /* Mutex is not taken during early boot */ - if (slab_state <= UP) - return 0; + if (slab_state <= UP) { + err = 0; + goto out; + } err = sysfs_slab_add(s); - if (err) { - __kmem_cache_release(s); - return err; - } + if (err) + goto out; if (s->flags & SLAB_STORE_USER) debugfs_slab_add(s); - return 0; +out: + if (err) + __kmem_cache_release(s); + return err; } #ifdef SLAB_SUPPORTS_SYSFS |