diff options
Diffstat (limited to 'include/linux/slab.h')
| -rw-r--r-- | include/linux/slab.h | 189 | 
1 files changed, 105 insertions, 84 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 739b21262507..7247e217e21b 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -56,6 +56,9 @@ enum _slab_flag_bits {  #endif  	_SLAB_OBJECT_POISON,  	_SLAB_CMPXCHG_DOUBLE, +#ifdef CONFIG_SLAB_OBJ_EXT +	_SLAB_NO_OBJ_EXT, +#endif  	_SLAB_FLAGS_LAST_BIT  }; @@ -202,6 +205,13 @@ enum _slab_flag_bits {  #endif  #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */ +/* Slab created using create_boot_cache */ +#ifdef CONFIG_SLAB_OBJ_EXT +#define SLAB_NO_OBJ_EXT		__SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT) +#else +#define SLAB_NO_OBJ_EXT		__SLAB_FLAG_UNUSED +#endif +  /*   * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.   * @@ -261,7 +271,10 @@ int kmem_cache_shrink(struct kmem_cache *s);  /*   * Common kmalloc functions provided by all allocators   */ -void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2); +void * __must_check krealloc_noprof(const void *objp, size_t new_size, +				    gfp_t flags) __realloc_size(2); +#define krealloc(...)				alloc_hooks(krealloc_noprof(__VA_ARGS__)) +  void kfree(const void *objp);  void kfree_sensitive(const void *objp);  size_t __ksize(const void *objp); @@ -513,7 +526,10 @@ static __always_inline unsigned int __kmalloc_index(size_t size,  static_assert(PAGE_SHIFT <= 20);  #define kmalloc_index(s) __kmalloc_index(s, true) -void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); +#include <linux/alloc_tag.h> + +void *__kmalloc_noprof(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); +#define __kmalloc(...)				alloc_hooks(__kmalloc_noprof(__VA_ARGS__))  /**   * kmem_cache_alloc - Allocate an object @@ -525,9 +541,14 @@ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_siz   *   * Return: pointer to the new object or %NULL in case of error   */ -void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc; -void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, -			   gfp_t gfpflags) __assume_slab_alignment __malloc; +void *kmem_cache_alloc_noprof(struct kmem_cache *cachep, +			      gfp_t flags) __assume_slab_alignment __malloc; +#define kmem_cache_alloc(...)			alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__)) + +void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, +			    gfp_t gfpflags) __assume_slab_alignment __malloc; +#define kmem_cache_alloc_lru(...)	alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__)) +  void kmem_cache_free(struct kmem_cache *s, void *objp);  /* @@ -538,29 +559,40 @@ void kmem_cache_free(struct kmem_cache *s, void *objp);   * Note that interrupts must be enabled when calling these functions.   */  void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p); -int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p); + +int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, void **p); +#define kmem_cache_alloc_bulk(...)	alloc_hooks(kmem_cache_alloc_bulk_noprof(__VA_ARGS__))  static __always_inline void kfree_bulk(size_t size, void **p)  {  	kmem_cache_free_bulk(NULL, size, p);  } -void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment +void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment  							 __alloc_size(1); -void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment -									 __malloc; +#define __kmalloc_node(...)			alloc_hooks(__kmalloc_node_noprof(__VA_ARGS__)) -void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size) +void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags, +				   int node) __assume_slab_alignment __malloc; +#define kmem_cache_alloc_node(...)	alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__)) + +void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t flags, size_t size)  		    __assume_kmalloc_alignment __alloc_size(3); -void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, -			 int node, size_t size) __assume_kmalloc_alignment +void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags, +		int node, size_t size) __assume_kmalloc_alignment  						__alloc_size(4); -void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment +#define kmalloc_trace(...)			alloc_hooks(kmalloc_trace_noprof(__VA_ARGS__)) + +#define kmalloc_node_trace(...)			alloc_hooks(kmalloc_node_trace_noprof(__VA_ARGS__)) + +void *kmalloc_large_noprof(size_t size, gfp_t flags) __assume_page_alignment  					      __alloc_size(1); +#define kmalloc_large(...)			alloc_hooks(kmalloc_large_noprof(__VA_ARGS__)) -void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment +void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) __assume_page_alignment  							     __alloc_size(1); +#define kmalloc_large_node(...)			alloc_hooks(kmalloc_large_node_noprof(__VA_ARGS__))  /**   * kmalloc - allocate kernel memory @@ -616,37 +648,39 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align   *	Try really hard to succeed the allocation but fail   *	eventually.   */ -static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) +static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t flags)  {  	if (__builtin_constant_p(size) && size) {  		unsigned int index;  		if (size > KMALLOC_MAX_CACHE_SIZE) -			return kmalloc_large(size, flags); +			return kmalloc_large_noprof(size, flags);  		index = kmalloc_index(size); -		return kmalloc_trace( +		return kmalloc_trace_noprof(  				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],  				flags, size);  	} -	return __kmalloc(size, flags); +	return __kmalloc_noprof(size, flags);  } +#define kmalloc(...)				alloc_hooks(kmalloc_noprof(__VA_ARGS__)) -static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) +static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node)  {  	if (__builtin_constant_p(size) && size) {  		unsigned int index;  		if (size > KMALLOC_MAX_CACHE_SIZE) -			return kmalloc_large_node(size, flags, node); +			return kmalloc_large_node_noprof(size, flags, node);  		index = kmalloc_index(size); -		return kmalloc_node_trace( +		return kmalloc_node_trace_noprof(  				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],  				flags, node, size);  	} -	return __kmalloc_node(size, flags, node); +	return __kmalloc_node_noprof(size, flags, node);  } +#define kmalloc_node(...)			alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))  /**   * kmalloc_array - allocate memory for an array. @@ -654,16 +688,17 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla   * @size: element size.   * @flags: the type of memory to allocate (see kmalloc).   */ -static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags) +static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t size, gfp_t flags)  {  	size_t bytes;  	if (unlikely(check_mul_overflow(n, size, &bytes)))  		return NULL;  	if (__builtin_constant_p(n) && __builtin_constant_p(size)) -		return kmalloc(bytes, flags); -	return __kmalloc(bytes, flags); +		return kmalloc_noprof(bytes, flags); +	return kmalloc_noprof(bytes, flags);  } +#define kmalloc_array(...)			alloc_hooks(kmalloc_array_noprof(__VA_ARGS__))  /**   * krealloc_array - reallocate memory for an array. @@ -672,18 +707,19 @@ static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_   * @new_size: new size of a single member of the array   * @flags: the type of memory to allocate (see kmalloc)   */ -static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p, -								      size_t new_n, -								      size_t new_size, -								      gfp_t flags) +static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p, +								       size_t new_n, +								       size_t new_size, +								       gfp_t flags)  {  	size_t bytes;  	if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))  		return NULL; -	return krealloc(p, bytes, flags); +	return krealloc_noprof(p, bytes, flags);  } +#define krealloc_array(...)			alloc_hooks(krealloc_array_noprof(__VA_ARGS__))  /**   * kcalloc - allocate memory for an array. The memory is set to zero. @@ -691,16 +727,12 @@ static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,   * @size: element size.   * @flags: the type of memory to allocate (see kmalloc).   */ -static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags) -{ -	return kmalloc_array(n, size, flags | __GFP_ZERO); -} +#define kcalloc(n, size, flags)		kmalloc_array(n, size, (flags) | __GFP_ZERO) -void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node, +void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node,  				  unsigned long caller) __alloc_size(1); -#define kmalloc_node_track_caller(size, flags, node) \ -	__kmalloc_node_track_caller(size, flags, node, \ -				    _RET_IP_) +#define kmalloc_node_track_caller(...)		\ +	alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))  /*   * kmalloc_track_caller is a special version of kmalloc that records the @@ -710,11 +742,12 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,   * allocator where we care about the real place the memory allocation   * request comes from.   */ -#define kmalloc_track_caller(size, flags) \ -	__kmalloc_node_track_caller(size, flags, \ -				    NUMA_NO_NODE, _RET_IP_) +#define kmalloc_track_caller(...)		kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE) -static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, +#define kmalloc_track_caller_noprof(...)	\ +		kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_) + +static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags,  							  int node)  {  	size_t bytes; @@ -722,75 +755,63 @@ static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size,  	if (unlikely(check_mul_overflow(n, size, &bytes)))  		return NULL;  	if (__builtin_constant_p(n) && __builtin_constant_p(size)) -		return kmalloc_node(bytes, flags, node); -	return __kmalloc_node(bytes, flags, node); +		return kmalloc_node_noprof(bytes, flags, node); +	return __kmalloc_node_noprof(bytes, flags, node);  } +#define kmalloc_array_node(...)			alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__)) -static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) -{ -	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); -} +#define kcalloc_node(_n, _size, _flags, _node)	\ +	kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node)  /*   * Shortcuts   */ -static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) -{ -	return kmem_cache_alloc(k, flags | __GFP_ZERO); -} +#define kmem_cache_zalloc(_k, _flags)		kmem_cache_alloc(_k, (_flags)|__GFP_ZERO)  /**   * kzalloc - allocate memory. The memory is set to zero.   * @size: how many bytes of memory are required.   * @flags: the type of memory to allocate (see kmalloc).   */ -static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags) +static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags)  { -	return kmalloc(size, flags | __GFP_ZERO); +	return kmalloc_noprof(size, flags | __GFP_ZERO);  } +#define kzalloc(...)				alloc_hooks(kzalloc_noprof(__VA_ARGS__)) +#define kzalloc_node(_size, _flags, _node)	kmalloc_node(_size, (_flags)|__GFP_ZERO, _node) -/** - * kzalloc_node - allocate zeroed memory from a particular memory node. - * @size: how many bytes of memory are required. - * @flags: the type of memory to allocate (see kmalloc). - * @node: memory node from which to allocate - */ -static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node) -{ -	return kmalloc_node(size, flags | __GFP_ZERO, node); -} +extern void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) __alloc_size(1); +#define kvmalloc_node(...)			alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__)) -extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1); -static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags) -{ -	return kvmalloc_node(size, flags, NUMA_NO_NODE); -} -static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node) -{ -	return kvmalloc_node(size, flags | __GFP_ZERO, node); -} -static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags) -{ -	return kvmalloc(size, flags | __GFP_ZERO); -} +#define kvmalloc(_size, _flags)			kvmalloc_node(_size, _flags, NUMA_NO_NODE) +#define kvmalloc_noprof(_size, _flags)		kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE) +#define kvzalloc(_size, _flags)			kvmalloc(_size, (_flags)|__GFP_ZERO) + +#define kvzalloc_node(_size, _flags, _node)	kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node) -static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags) +static inline __alloc_size(1, 2) void * +kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)  {  	size_t bytes;  	if (unlikely(check_mul_overflow(n, size, &bytes)))  		return NULL; -	return kvmalloc(bytes, flags); +	return kvmalloc_node_noprof(bytes, flags, node);  } -static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags) -{ -	return kvmalloc_array(n, size, flags | __GFP_ZERO); -} +#define kvmalloc_array_noprof(...)		kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE) +#define kvcalloc_node_noprof(_n,_s,_f,_node)	kvmalloc_array_node_noprof(_n,_s,(_f)|__GFP_ZERO,_node) +#define kvcalloc_noprof(...)			kvcalloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE) -extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) +#define kvmalloc_array(...)			alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__)) +#define kvcalloc_node(...)			alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__)) +#define kvcalloc(...)				alloc_hooks(kvcalloc_noprof(__VA_ARGS__)) + +extern void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags)  		      __realloc_size(3); +#define kvrealloc(...)				alloc_hooks(kvrealloc_noprof(__VA_ARGS__)) +  extern void kvfree(const void *addr);  DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T))  |