diff options
| author | Vlastimil Babka <[email protected]> | 2022-11-10 09:44:07 +0100 |
|---|---|---|
| committer | Vlastimil Babka <[email protected]> | 2022-11-21 10:35:37 +0100 |
| commit | 4b28ba9eeab4345af43e45e6eb4056eb2f1cb764 (patch) | |
| tree | d40099d9c163394f8dd7ded45d78a28f3610b0ec /include/linux | |
| parent | c18c20f16219516b12a4f2fd29c25e06be97e064 (diff) | |
| parent | 838de63b101147fc7d8af828465cf6d1d30232a8 (diff) | |
Merge branch 'slab/for-6.2/cleanups' into slab/for-next
- Removal of dead code from deactivate_slab() by Hyeonggon Yoo.
- Fix of BUILD_BUG_ON() for sufficient early percpu size by Baoquan He.
- Make kmem_cache_alloc() kernel-doc less misleading, by myself.
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/percpu.h | 2 | ||||
| -rw-r--r-- | include/linux/slab.h | 23 |
2 files changed, 18 insertions, 7 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index f1ec5ad1351c..3dbb6fb70658 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -42,7 +42,7 @@ * larger than PERCPU_DYNAMIC_EARLY_SIZE. */ #define PERCPU_DYNAMIC_EARLY_SLOTS 128 -#define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10) +#define PERCPU_DYNAMIC_EARLY_SIZE (20 << 10) /* * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy diff --git a/include/linux/slab.h b/include/linux/slab.h index 45efc6c553b8..067f0e80be9e 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -441,7 +441,18 @@ static_assert(PAGE_SHIFT <= 20); #endif /* !CONFIG_SLOB */ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); -void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc; + +/** + * kmem_cache_alloc - Allocate an object + * @cachep: The cache to allocate from. + * @flags: See kmalloc(). + * + * Allocate an object from this cache. + * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags. + * + * Return: pointer to the new object or %NULL in case of error + */ +void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc; void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, gfp_t gfpflags) __assume_slab_alignment __malloc; void kmem_cache_free(struct kmem_cache *s, void *objp); @@ -483,9 +494,9 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align __alloc_size(1); /** - * kmalloc - allocate memory + * kmalloc - allocate kernel memory * @size: how many bytes of memory are required. - * @flags: the type of memory to allocate. + * @flags: describe the allocation context * * kmalloc is the normal method of allocating memory * for objects smaller than page size in the kernel. @@ -512,12 +523,12 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align * %GFP_ATOMIC * Allocation will not sleep. May use emergency pools. * - * %GFP_HIGHUSER - * Allocate memory from high memory on behalf of user. - * * Also it is possible to set different flags by OR'ing * in one or more of the following additional @flags: * + * %__GFP_ZERO + * Zero the allocated memory before returning. Also see kzalloc(). + * * %__GFP_HIGH * This allocation has high priority and may use emergency pools. * |