diff options
Diffstat (limited to 'fs/proc/kcore.c')
| -rw-r--r-- | fs/proc/kcore.c | 85 | 
1 files changed, 40 insertions, 45 deletions
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 71157ee35c1a..25b44b303b35 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -24,7 +24,7 @@  #include <linux/memblock.h>  #include <linux/init.h>  #include <linux/slab.h> -#include <linux/uaccess.h> +#include <linux/uio.h>  #include <asm/io.h>  #include <linux/list.h>  #include <linux/ioport.h> @@ -307,10 +307,9 @@ static void append_kcore_note(char *notes, size_t *i, const char *name,  	*i = ALIGN(*i + descsz, 4);  } -static ssize_t -read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) +static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)  { -	char *buf = file->private_data; +	loff_t *fpos = &iocb->ki_pos;  	size_t phdrs_offset, notes_offset, data_offset;  	size_t page_offline_frozen = 1;  	size_t phdrs_len, notes_len; @@ -318,6 +317,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)  	size_t tsz;  	int nphdr;  	unsigned long start; +	size_t buflen = iov_iter_count(iter);  	size_t orig_buflen = buflen;  	int ret = 0; @@ -356,12 +356,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)  		};  		tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos); -		if (copy_to_user(buffer, (char *)&ehdr + *fpos, tsz)) { +		if (copy_to_iter((char *)&ehdr + *fpos, tsz, iter) != tsz) {  			ret = -EFAULT;  			goto out;  		} -		buffer += tsz;  		buflen -= tsz;  		*fpos += tsz;  	} @@ -398,15 +397,14 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)  		}  		tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos); -		if (copy_to_user(buffer, (char *)phdrs + *fpos - phdrs_offset, -				 tsz)) { +		if (copy_to_iter((char *)phdrs + *fpos - phdrs_offset, tsz, +				 iter) != tsz) {  			kfree(phdrs);  			ret = -EFAULT;  			goto out;  		}  		kfree(phdrs); -		buffer += tsz;  		buflen -= tsz;  		*fpos += tsz;  	} @@ -448,14 +446,13 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)  				  min(vmcoreinfo_size, notes_len - i));  		tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos); -		if (copy_to_user(buffer, notes + *fpos - notes_offset, tsz)) { +		if (copy_to_iter(notes + *fpos - notes_offset, tsz, iter) != tsz) {  			kfree(notes);  			ret = -EFAULT;  			goto out;  		}  		kfree(notes); -		buffer += tsz;  		buflen -= tsz;  		*fpos += tsz;  	} @@ -497,7 +494,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)  		}  		if (!m) { -			if (clear_user(buffer, tsz)) { +			if (iov_iter_zero(tsz, iter) != tsz) {  				ret = -EFAULT;  				goto out;  			} @@ -506,16 +503,33 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)  		switch (m->type) {  		case KCORE_VMALLOC: -			vread(buf, (char *)start, tsz); -			/* we have to zero-fill user buffer even if no read */ -			if (copy_to_user(buffer, buf, tsz)) { -				ret = -EFAULT; -				goto out; +		{ +			const char *src = (char *)start; +			size_t read = 0, left = tsz; + +			/* +			 * vmalloc uses spinlocks, so we optimistically try to +			 * read memory. If this fails, fault pages in and try +			 * again until we are done. +			 */ +			while (true) { +				read += vread_iter(iter, src, left); +				if (read == tsz) +					break; + +				src += read; +				left -= read; + +				if (fault_in_iov_iter_writeable(iter, left)) { +					ret = -EFAULT; +					goto out; +				}  			}  			break; +		}  		case KCORE_USER:  			/* User page is handled prior to normal kernel page: */ -			if (copy_to_user(buffer, (char *)start, tsz)) { +			if (copy_to_iter((char *)start, tsz, iter) != tsz) {  				ret = -EFAULT;  				goto out;  			} @@ -531,7 +545,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)  			 */  			if (!page || PageOffline(page) ||  			    is_page_hwpoison(page) || !pfn_is_ram(pfn)) { -				if (clear_user(buffer, tsz)) { +				if (iov_iter_zero(tsz, iter) != tsz) {  					ret = -EFAULT;  					goto out;  				} @@ -541,24 +555,17 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)  		case KCORE_VMEMMAP:  		case KCORE_TEXT:  			/* -			 * Using bounce buffer to bypass the -			 * hardened user copy kernel text checks. +			 * We use _copy_to_iter() to bypass usermode hardening +			 * which would otherwise prevent this operation.  			 */ -			if (copy_from_kernel_nofault(buf, (void *)start, tsz)) { -				if (clear_user(buffer, tsz)) { -					ret = -EFAULT; -					goto out; -				} -			} else { -				if (copy_to_user(buffer, buf, tsz)) { -					ret = -EFAULT; -					goto out; -				} +			if (_copy_to_iter((char *)start, tsz, iter) != tsz) { +				ret = -EFAULT; +				goto out;  			}  			break;  		default:  			pr_warn_once("Unhandled KCORE type: %d\n", m->type); -			if (clear_user(buffer, tsz)) { +			if (iov_iter_zero(tsz, iter) != tsz) {  				ret = -EFAULT;  				goto out;  			} @@ -566,7 +573,6 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)  skip:  		buflen -= tsz;  		*fpos += tsz; -		buffer += tsz;  		start += tsz;  		tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);  	} @@ -589,10 +595,6 @@ static int open_kcore(struct inode *inode, struct file *filp)  	if (ret)  		return ret; -	filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL); -	if (!filp->private_data) -		return -ENOMEM; -  	if (kcore_need_update)  		kcore_update_ram();  	if (i_size_read(inode) != proc_root_kcore->size) { @@ -603,16 +605,9 @@ static int open_kcore(struct inode *inode, struct file *filp)  	return 0;  } -static int release_kcore(struct inode *inode, struct file *file) -{ -	kfree(file->private_data); -	return 0; -} -  static const struct proc_ops kcore_proc_ops = { -	.proc_read	= read_kcore, +	.proc_read_iter	= read_kcore_iter,  	.proc_open	= open_kcore, -	.proc_release	= release_kcore,  	.proc_lseek	= default_llseek,  };  |