diff options
Diffstat (limited to 'fs/binfmt_elf.c')
| -rw-r--r-- | fs/binfmt_elf.c | 101 | 
1 files changed, 67 insertions, 34 deletions
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index a43897b03ce9..5ae8045f4df4 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1003,7 +1003,8 @@ out_free_interp:  	if (elf_read_implies_exec(*elf_ex, executable_stack))  		current->personality |= READ_IMPLIES_EXEC; -	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +	const int snapshot_randomize_va_space = READ_ONCE(randomize_va_space); +	if (!(current->personality & ADDR_NO_RANDOMIZE) && snapshot_randomize_va_space)  		current->flags |= PF_RANDOMIZE;  	setup_new_exec(bprm); @@ -1061,10 +1062,40 @@ out_free_interp:  			 * Header for ET_DYN binaries to calculate the  			 * randomization (load_bias) for all the LOAD  			 * Program Headers. +			 */ + +			/* +			 * Calculate the entire size of the ELF mapping +			 * (total_size), used for the initial mapping, +			 * due to load_addr_set which is set to true later +			 * once the initial mapping is performed. +			 * +			 * Note that this is only sensible when the LOAD +			 * segments are contiguous (or overlapping). If +			 * used for LOADs that are far apart, this would +			 * cause the holes between LOADs to be mapped, +			 * running the risk of having the mapping fail, +			 * as it would be larger than the ELF file itself.  			 * +			 * As a result, only ET_DYN does this, since +			 * some ET_EXEC (e.g. ia64) may have large virtual +			 * memory holes between LOADs. +			 * +			 */ +			total_size = total_mapping_size(elf_phdata, +							elf_ex->e_phnum); +			if (!total_size) { +				retval = -EINVAL; +				goto out_free_dentry; +			} + +			/* Calculate any requested alignment. */ +			alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum); + +			/*  			 * There are effectively two types of ET_DYN -			 * binaries: programs (i.e. PIE: ET_DYN with INTERP) -			 * and loaders (ET_DYN without INTERP, since they +			 * binaries: programs (i.e. PIE: ET_DYN with PT_INTERP) +			 * and loaders (ET_DYN without PT_INTERP, since they  			 * _are_ the ELF interpreter). The loaders must  			 * be loaded away from programs since the program  			 * may otherwise collide with the loader (especially @@ -1084,15 +1115,44 @@ out_free_interp:  			 * without MAP_FIXED nor MAP_FIXED_NOREPLACE).  			 */  			if (interpreter) { +				/* On ET_DYN with PT_INTERP, we do the ASLR. */  				load_bias = ELF_ET_DYN_BASE;  				if (current->flags & PF_RANDOMIZE)  					load_bias += arch_mmap_rnd(); -				alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum); +				/* Adjust alignment as requested. */  				if (alignment)  					load_bias &= ~(alignment - 1);  				elf_flags |= MAP_FIXED_NOREPLACE; -			} else -				load_bias = 0; +			} else { +				/* +				 * For ET_DYN without PT_INTERP, we rely on +				 * the architectures's (potentially ASLR) mmap +				 * base address (via a load_bias of 0). +				 * +				 * When a large alignment is requested, we +				 * must do the allocation at address "0" right +				 * now to discover where things will load so +				 * that we can adjust the resulting alignment. +				 * In this case (load_bias != 0), we can use +				 * MAP_FIXED_NOREPLACE to make sure the mapping +				 * doesn't collide with anything. +				 */ +				if (alignment > ELF_MIN_ALIGN) { +					load_bias = elf_load(bprm->file, 0, elf_ppnt, +							     elf_prot, elf_flags, total_size); +					if (BAD_ADDR(load_bias)) { +						retval = IS_ERR_VALUE(load_bias) ? +							 PTR_ERR((void*)load_bias) : -EINVAL; +						goto out_free_dentry; +					} +					vm_munmap(load_bias, total_size); +					/* Adjust alignment as requested. */ +					if (alignment) +						load_bias &= ~(alignment - 1); +					elf_flags |= MAP_FIXED_NOREPLACE; +				} else +					load_bias = 0; +			}  			/*  			 * Since load_bias is used for all subsequent loading @@ -1102,31 +1162,6 @@ out_free_interp:  			 * is then page aligned.  			 */  			load_bias = ELF_PAGESTART(load_bias - vaddr); - -			/* -			 * Calculate the entire size of the ELF mapping -			 * (total_size), used for the initial mapping, -			 * due to load_addr_set which is set to true later -			 * once the initial mapping is performed. -			 * -			 * Note that this is only sensible when the LOAD -			 * segments are contiguous (or overlapping). If -			 * used for LOADs that are far apart, this would -			 * cause the holes between LOADs to be mapped, -			 * running the risk of having the mapping fail, -			 * as it would be larger than the ELF file itself. -			 * -			 * As a result, only ET_DYN does this, since -			 * some ET_EXEC (e.g. ia64) may have large virtual -			 * memory holes between LOADs. -			 * -			 */ -			total_size = total_mapping_size(elf_phdata, -							elf_ex->e_phnum); -			if (!total_size) { -				retval = -EINVAL; -				goto out_free_dentry; -			}  		}  		error = elf_load(bprm->file, load_bias + vaddr, elf_ppnt, @@ -1216,7 +1251,6 @@ out_free_interp:  		}  		reloc_func_desc = interp_load_addr; -		allow_write_access(interpreter);  		fput(interpreter);  		kfree(interp_elf_ex); @@ -1251,7 +1285,7 @@ out_free_interp:  	mm->end_data = end_data;  	mm->start_stack = bprm->p; -	if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) { +	if ((current->flags & PF_RANDOMIZE) && (snapshot_randomize_va_space > 1)) {  		/*  		 * For architectures with ELF randomization, when executing  		 * a loader directly (i.e. no interpreter listed in ELF @@ -1308,7 +1342,6 @@ out_free_dentry:  	kfree(interp_elf_ex);  	kfree(interp_elf_phdata);  out_free_file: -	allow_write_access(interpreter);  	if (interpreter)  		fput(interpreter);  out_free_ph:  |