aboutsummaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c160
1 files changed, 81 insertions, 79 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 81cea762d094..3d454888e921 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -466,12 +466,6 @@ static struct workqueue_struct *flushwq;
*******************************************************************/
/*
- * freeptr_t represents a SLUB freelist pointer, which might be encoded
- * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
- */
-typedef struct { unsigned long v; } freeptr_t;
-
-/*
* Returns freelist pointer (ptr). With hardening, this is obfuscated
* with an XOR of the address where the pointer is held and a per-cache
* random number.
@@ -4019,6 +4013,8 @@ static void *__slab_alloc_node(struct kmem_cache *s,
/*
* If the object has been wiped upon free, make sure it's fully initialized by
* zeroing out freelist pointer.
+ *
+ * Note that we also wipe custom freelist pointers.
*/
static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
void *obj)
@@ -5282,7 +5278,7 @@ static void set_cpu_partial(struct kmem_cache *s)
* calculate_sizes() determines the order and the distribution of data within
* a slab object.
*/
-static int calculate_sizes(struct kmem_cache *s)
+static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
{
slab_flags_t flags = s->flags;
unsigned int size = s->object_size;
@@ -5323,7 +5319,8 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->inuse = size;
- if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || s->ctor ||
+ if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) ||
+ (flags & SLAB_POISON) || s->ctor ||
((flags & SLAB_RED_ZONE) &&
(s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
/*
@@ -5344,6 +5341,8 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->offset = size;
size += sizeof(void *);
+ } else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) {
+ s->offset = args->freeptr_offset;
} else {
/*
* Store freelist pointer near middle of object to keep
@@ -5418,65 +5417,6 @@ static int calculate_sizes(struct kmem_cache *s)
return !!oo_objects(s->oo);
}
-static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
-{
- s->flags = kmem_cache_flags(flags, s->name);
-#ifdef CONFIG_SLAB_FREELIST_HARDENED
- s->random = get_random_long();
-#endif
-
- if (!calculate_sizes(s))
- goto error;
- if (disable_higher_order_debug) {
- /*
- * Disable debugging flags that store metadata if the min slab
- * order increased.
- */
- if (get_order(s->size) > get_order(s->object_size)) {
- s->flags &= ~DEBUG_METADATA_FLAGS;
- s->offset = 0;
- if (!calculate_sizes(s))
- goto error;
- }
- }
-
-#ifdef system_has_freelist_aba
- if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
- /* Enable fast mode */
- s->flags |= __CMPXCHG_DOUBLE;
- }
-#endif
-
- /*
- * The larger the object size is, the more slabs we want on the partial
- * list to avoid pounding the page allocator excessively.
- */
- s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
- s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
-
- set_cpu_partial(s);
-
-#ifdef CONFIG_NUMA
- s->remote_node_defrag_ratio = 1000;
-#endif
-
- /* Initialize the pre-computed randomized freelist if slab is up */
- if (slab_state >= UP) {
- if (init_cache_random_seq(s))
- goto error;
- }
-
- if (!init_kmem_cache_nodes(s))
- goto error;
-
- if (alloc_kmem_cache_cpus(s))
- return 0;
-
-error:
- __kmem_cache_release(s);
- return -EINVAL;
-}
-
static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
const char *text)
{
@@ -6030,28 +5970,90 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
return s;
}
-int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
+int do_kmem_cache_create(struct kmem_cache *s, const char *name,
+ unsigned int size, struct kmem_cache_args *args,
+ slab_flags_t flags)
{
- int err;
+ int err = -EINVAL;
- err = kmem_cache_open(s, flags);
- if (err)
- return err;
+ s->name = name;
+ s->size = s->object_size = size;
+
+ s->flags = kmem_cache_flags(flags, s->name);
+#ifdef CONFIG_SLAB_FREELIST_HARDENED
+ s->random = get_random_long();
+#endif
+ s->align = args->align;
+ s->ctor = args->ctor;
+#ifdef CONFIG_HARDENED_USERCOPY
+ s->useroffset = args->useroffset;
+ s->usersize = args->usersize;
+#endif
+
+ if (!calculate_sizes(args, s))
+ goto out;
+ if (disable_higher_order_debug) {
+ /*
+ * Disable debugging flags that store metadata if the min slab
+ * order increased.
+ */
+ if (get_order(s->size) > get_order(s->object_size)) {
+ s->flags &= ~DEBUG_METADATA_FLAGS;
+ s->offset = 0;
+ if (!calculate_sizes(args, s))
+ goto out;
+ }
+ }
+
+#ifdef system_has_freelist_aba
+ if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
+ /* Enable fast mode */
+ s->flags |= __CMPXCHG_DOUBLE;
+ }
+#endif
+
+ /*
+ * The larger the object size is, the more slabs we want on the partial
+ * list to avoid pounding the page allocator excessively.
+ */
+ s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
+ s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
+
+ set_cpu_partial(s);
+
+#ifdef CONFIG_NUMA
+ s->remote_node_defrag_ratio = 1000;
+#endif
+
+ /* Initialize the pre-computed randomized freelist if slab is up */
+ if (slab_state >= UP) {
+ if (init_cache_random_seq(s))
+ goto out;
+ }
+
+ if (!init_kmem_cache_nodes(s))
+ goto out;
+
+ if (!alloc_kmem_cache_cpus(s))
+ goto out;
/* Mutex is not taken during early boot */
- if (slab_state <= UP)
- return 0;
+ if (slab_state <= UP) {
+ err = 0;
+ goto out;
+ }
err = sysfs_slab_add(s);
- if (err) {
- __kmem_cache_release(s);
- return err;
- }
+ if (err)
+ goto out;
if (s->flags & SLAB_STORE_USER)
debugfs_slab_add(s);
- return 0;
+out:
+ if (err)
+ __kmem_cache_release(s);
+ return err;
}
#ifdef SLAB_SUPPORTS_SYSFS