diff options
Diffstat (limited to 'kernel/module/main.c')
| -rw-r--r-- | kernel/module/main.c | 125 | 
1 files changed, 65 insertions, 60 deletions
diff --git a/kernel/module/main.c b/kernel/module/main.c index e1e8a7a9d6c1..d18a94b973e1 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -56,7 +56,9 @@  #include <linux/dynamic_debug.h>  #include <linux/audit.h>  #include <linux/cfi.h> +#include <linux/codetag.h>  #include <linux/debugfs.h> +#include <linux/execmem.h>  #include <uapi/linux/module.h>  #include "internal.h" @@ -1179,16 +1181,6 @@ resolve_symbol_wait(struct module *mod,  	return ksym;  } -void __weak module_memfree(void *module_region) -{ -	/* -	 * This memory may be RO, and freeing RO memory in an interrupt is not -	 * supported by vmalloc. -	 */ -	WARN_ON(in_interrupt()); -	vfree(module_region); -} -  void __weak module_arch_cleanup(struct module *mod)  {  } @@ -1197,28 +1189,54 @@ void __weak module_arch_freeing_init(struct module *mod)  {  } -static bool mod_mem_use_vmalloc(enum mod_mem_type type) +static int module_memory_alloc(struct module *mod, enum mod_mem_type type)  { -	return IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC) && -		mod_mem_type_is_core_data(type); -} +	unsigned int size = PAGE_ALIGN(mod->mem[type].size); +	enum execmem_type execmem_type; +	void *ptr; -static void *module_memory_alloc(unsigned int size, enum mod_mem_type type) -{ -	if (mod_mem_use_vmalloc(type)) -		return vzalloc(size); -	return module_alloc(size); +	mod->mem[type].size = size; + +	if (mod_mem_type_is_data(type)) +		execmem_type = EXECMEM_MODULE_DATA; +	else +		execmem_type = EXECMEM_MODULE_TEXT; + +	ptr = execmem_alloc(execmem_type, size); +	if (!ptr) +		return -ENOMEM; + +	/* +	 * The pointer to these blocks of memory are stored on the module +	 * structure and we keep that around so long as the module is +	 * around. We only free that memory when we unload the module. +	 * Just mark them as not being a leak then. The .init* ELF +	 * sections *do* get freed after boot so we *could* treat them +	 * slightly differently with kmemleak_ignore() and only grey +	 * them out as they work as typical memory allocations which +	 * *do* eventually get freed, but let's just keep things simple +	 * and avoid *any* false positives. +	 */ +	kmemleak_not_leak(ptr); + +	memset(ptr, 0, size); +	mod->mem[type].base = ptr; + +	return 0;  } -static void module_memory_free(void *ptr, enum mod_mem_type type) +static void module_memory_free(struct module *mod, enum mod_mem_type type, +			       bool unload_codetags)  { -	if (mod_mem_use_vmalloc(type)) -		vfree(ptr); -	else -		module_memfree(ptr); +	void *ptr = mod->mem[type].base; + +	if (!unload_codetags && mod_mem_type_is_core_data(type)) +		return; + +	execmem_free(ptr);  } -static void free_mod_mem(struct module *mod) +static void free_mod_mem(struct module *mod, bool unload_codetags)  {  	for_each_mod_mem_type(type) {  		struct module_memory *mod_mem = &mod->mem[type]; @@ -1229,19 +1247,26 @@ static void free_mod_mem(struct module *mod)  		/* Free lock-classes; relies on the preceding sync_rcu(). */  		lockdep_free_key_range(mod_mem->base, mod_mem->size);  		if (mod_mem->size) -			module_memory_free(mod_mem->base, type); +			module_memory_free(mod, type, unload_codetags);  	}  	/* MOD_DATA hosts mod, so free it at last */  	lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size); -	module_memory_free(mod->mem[MOD_DATA].base, MOD_DATA); +	module_memory_free(mod, MOD_DATA, unload_codetags);  }  /* Free a module, remove from lists, etc. */  static void free_module(struct module *mod)  { +	bool unload_codetags; +  	trace_module_free(mod); +	unload_codetags = codetag_unload_module(mod); +	if (!unload_codetags) +		pr_warn("%s: memory allocation(s) from the module still alive, cannot unload cleanly\n", +			mod->name); +  	mod_sysfs_teardown(mod);  	/* @@ -1283,7 +1308,7 @@ static void free_module(struct module *mod)  	kfree(mod->args);  	percpu_modfree(mod); -	free_mod_mem(mod); +	free_mod_mem(mod, unload_codetags);  }  void *__symbol_get(const char *symbol) @@ -1610,13 +1635,6 @@ static void free_modinfo(struct module *mod)  	}  } -void * __weak module_alloc(unsigned long size) -{ -	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, -			GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, -			NUMA_NO_NODE, __builtin_return_address(0)); -} -  bool __weak module_init_section(const char *name)  {  	return strstarts(name, ".init"); @@ -2225,7 +2243,6 @@ static int find_module_sections(struct module *mod, struct load_info *info)  static int move_module(struct module *mod, struct load_info *info)  {  	int i; -	void *ptr;  	enum mod_mem_type t = 0;  	int ret = -ENOMEM; @@ -2234,26 +2251,12 @@ static int move_module(struct module *mod, struct load_info *info)  			mod->mem[type].base = NULL;  			continue;  		} -		mod->mem[type].size = PAGE_ALIGN(mod->mem[type].size); -		ptr = module_memory_alloc(mod->mem[type].size, type); -		/* -                 * The pointer to these blocks of memory are stored on the module -                 * structure and we keep that around so long as the module is -                 * around. We only free that memory when we unload the module. -                 * Just mark them as not being a leak then. The .init* ELF -                 * sections *do* get freed after boot so we *could* treat them -                 * slightly differently with kmemleak_ignore() and only grey -                 * them out as they work as typical memory allocations which -                 * *do* eventually get freed, but let's just keep things simple -                 * and avoid *any* false positives. -		 */ -		kmemleak_not_leak(ptr); -		if (!ptr) { + +		ret = module_memory_alloc(mod, type); +		if (ret) {  			t = type;  			goto out_enomem;  		} -		memset(ptr, 0, mod->mem[type].size); -		mod->mem[type].base = ptr;  	}  	/* Transfer each section which specifies SHF_ALLOC */ @@ -2296,7 +2299,7 @@ static int move_module(struct module *mod, struct load_info *info)  	return 0;  out_enomem:  	for (t--; t >= 0; t--) -		module_memory_free(mod->mem[t].base, t); +		module_memory_free(mod, t, true);  	return ret;  } @@ -2426,7 +2429,7 @@ static void module_deallocate(struct module *mod, struct load_info *info)  	percpu_modfree(mod);  	module_arch_freeing_init(mod); -	free_mod_mem(mod); +	free_mod_mem(mod, true);  }  int __weak module_finalize(const Elf_Ehdr *hdr, @@ -2482,9 +2485,9 @@ static void do_free_init(struct work_struct *w)  	llist_for_each_safe(pos, n, list) {  		initfree = container_of(pos, struct mod_initfree, node); -		module_memfree(initfree->init_text); -		module_memfree(initfree->init_data); -		module_memfree(initfree->init_rodata); +		execmem_free(initfree->init_text); +		execmem_free(initfree->init_data); +		execmem_free(initfree->init_rodata);  		kfree(initfree);  	}  } @@ -2594,10 +2597,10 @@ static noinline int do_init_module(struct module *mod)  	 * We want to free module_init, but be aware that kallsyms may be  	 * walking this with preempt disabled.  In all the failure paths, we  	 * call synchronize_rcu(), but we don't want to slow down the success -	 * path. module_memfree() cannot be called in an interrupt, so do the +	 * path. execmem_free() cannot be called in an interrupt, so do the  	 * work and call synchronize_rcu() in a work queue.  	 * -	 * Note that module_alloc() on most architectures creates W+X page +	 * Note that execmem_alloc() on most architectures creates W+X page  	 * mappings which won't be cleaned up until do_free_init() runs.  Any  	 * code such as mark_rodata_ro() which depends on those mappings to  	 * be cleaned up needs to sync with the queued work by invoking @@ -2995,6 +2998,8 @@ static int load_module(struct load_info *info, const char __user *uargs,  	/* Get rid of temporary copy. */  	free_copy(info, flags); +	codetag_load_module(mod); +  	/* Done! */  	trace_module_load(mod);  |