diff options
Diffstat (limited to 'mm/util.c')
| -rw-r--r-- | mm/util.c | 55 | 
1 files changed, 44 insertions, 11 deletions
| diff --git a/mm/util.c b/mm/util.c index 6c3e6710e4de..bd283e2132e0 100644 --- a/mm/util.c +++ b/mm/util.c @@ -26,6 +26,8 @@  #include <linux/uaccess.h> +#include <kunit/visibility.h> +  #include "internal.h"  #include "swap.h" @@ -139,14 +141,14 @@ EXPORT_SYMBOL(kmemdup_noprof);   * kmemdup_array - duplicate a given array.   *   * @src: array to duplicate. - * @element_size: size of each element of array.   * @count: number of elements to duplicate from array. + * @element_size: size of each element of array.   * @gfp: GFP mask to use.   *   * Return: duplicated array of @src or %NULL in case of error,   * result is physically contiguous. Use kfree() to free.   */ -void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp) +void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)  {  	return kmemdup(src, size_mul(element_size, count), gfp);  } @@ -198,6 +200,16 @@ char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)  }  EXPORT_SYMBOL(kmemdup_nul); +static kmem_buckets *user_buckets __ro_after_init; + +static int __init init_user_buckets(void) +{ +	user_buckets = kmem_buckets_create("memdup_user", 0, 0, INT_MAX, NULL); + +	return 0; +} +subsys_initcall(init_user_buckets); +  /**   * memdup_user - duplicate memory region from user space   * @@ -211,7 +223,7 @@ void *memdup_user(const void __user *src, size_t len)  {  	void *p; -	p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); +	p = kmem_buckets_alloc_track_caller(user_buckets, len, GFP_USER | __GFP_NOWARN);  	if (!p)  		return ERR_PTR(-ENOMEM); @@ -237,7 +249,7 @@ void *vmemdup_user(const void __user *src, size_t len)  {  	void *p; -	p = kvmalloc(len, GFP_USER); +	p = kmem_buckets_valloc(user_buckets, len, GFP_USER);  	if (!p)  		return ERR_PTR(-ENOMEM); @@ -482,6 +494,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)  	clear_bit(MMF_TOPDOWN, &mm->flags);  }  #endif +#ifdef CONFIG_MMU +EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout); +#endif  /**   * __account_locked_vm - account locked pages to an mm's locked_vm @@ -594,9 +609,10 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,  EXPORT_SYMBOL(vm_mmap);  /** - * kvmalloc_node - attempt to allocate physically contiguous memory, but upon + * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon   * failure, fall back to non-contiguous (vmalloc) allocation.   * @size: size of the request. + * @b: which set of kmalloc buckets to allocate from.   * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.   * @node: numa node to allocate from   * @@ -609,7 +625,7 @@ EXPORT_SYMBOL(vm_mmap);   *   * Return: pointer to the allocated memory of %NULL in case of failure   */ -void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) +void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)  {  	gfp_t kmalloc_flags = flags;  	void *ret; @@ -631,7 +647,7 @@ void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node)  		kmalloc_flags &= ~__GFP_NOFAIL;  	} -	ret = kmalloc_node_noprof(size, kmalloc_flags, node); +	ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b), kmalloc_flags, node);  	/*  	 * It doesn't really make sense to fallback to vmalloc for sub page @@ -660,7 +676,7 @@ void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node)  			flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,  			node, __builtin_return_address(0));  } -EXPORT_SYMBOL(kvmalloc_node_noprof); +EXPORT_SYMBOL(__kvmalloc_node_noprof);  /**   * kvfree() - Free memory. @@ -828,6 +844,23 @@ void folio_copy(struct folio *dst, struct folio *src)  }  EXPORT_SYMBOL(folio_copy); +int folio_mc_copy(struct folio *dst, struct folio *src) +{ +	long nr = folio_nr_pages(src); +	long i = 0; + +	for (;;) { +		if (copy_mc_highpage(folio_page(dst, i), folio_page(src, i))) +			return -EHWPOISON; +		if (++i == nr) +			break; +		cond_resched(); +	} + +	return 0; +} +EXPORT_SYMBOL(folio_mc_copy); +  int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;  int sysctl_overcommit_ratio __read_mostly = 50;  unsigned long sysctl_overcommit_kbytes __read_mostly; @@ -835,7 +868,7 @@ int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;  unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */  unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ -int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer, +int overcommit_ratio_handler(const struct ctl_table *table, int write, void *buffer,  		size_t *lenp, loff_t *ppos)  {  	int ret; @@ -851,7 +884,7 @@ static void sync_overcommit_as(struct work_struct *dummy)  	percpu_counter_sync(&vm_committed_as);  } -int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, +int overcommit_policy_handler(const struct ctl_table *table, int write, void *buffer,  		size_t *lenp, loff_t *ppos)  {  	struct ctl_table t; @@ -887,7 +920,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,  	return ret;  } -int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer, +int overcommit_kbytes_handler(const struct ctl_table *table, int write, void *buffer,  		size_t *lenp, loff_t *ppos)  {  	int ret; |