diff options
Diffstat (limited to 'mm/mmap.c')
| -rw-r--r-- | mm/mmap.c | 51 | 
1 files changed, 37 insertions, 14 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index 129b847d30cc..c0a3637cdb64 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -31,6 +31,7 @@  #include <linux/mempolicy.h>  #include <linux/rmap.h>  #include <linux/mmu_notifier.h> +#include <linux/mmdebug.h>  #include <linux/perf_event.h>  #include <linux/audit.h>  #include <linux/khugepaged.h> @@ -134,6 +135,10 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)  {  	unsigned long free, allowed, reserve; +	VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) < +			-(s64)vm_committed_as_batch * num_online_cpus(), +			"memory commitment underflow"); +  	vm_acct_memory(pages);  	/* @@ -216,7 +221,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,  	if (vma->vm_flags & VM_DENYWRITE)  		atomic_inc(&file_inode(file)->i_writecount);  	if (vma->vm_flags & VM_SHARED) -		mapping->i_mmap_writable--; +		mapping_unmap_writable(mapping);  	flush_dcache_mmap_lock(mapping);  	if (unlikely(vma->vm_flags & VM_NONLINEAR)) @@ -364,20 +369,20 @@ static int browse_rb(struct rb_root *root)  		struct vm_area_struct *vma;  		vma = rb_entry(nd, struct vm_area_struct, vm_rb);  		if (vma->vm_start < prev) { -			pr_info("vm_start %lx prev %lx\n", vma->vm_start, prev); +			pr_emerg("vm_start %lx prev %lx\n", vma->vm_start, prev);  			bug = 1;  		}  		if (vma->vm_start < pend) { -			pr_info("vm_start %lx pend %lx\n", vma->vm_start, pend); +			pr_emerg("vm_start %lx pend %lx\n", vma->vm_start, pend);  			bug = 1;  		}  		if (vma->vm_start > vma->vm_end) { -			pr_info("vm_end %lx < vm_start %lx\n", +			pr_emerg("vm_end %lx < vm_start %lx\n",  				vma->vm_end, vma->vm_start);  			bug = 1;  		}  		if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { -			pr_info("free gap %lx, correct %lx\n", +			pr_emerg("free gap %lx, correct %lx\n",  			       vma->rb_subtree_gap,  			       vma_compute_subtree_gap(vma));  			bug = 1; @@ -391,7 +396,7 @@ static int browse_rb(struct rb_root *root)  	for (nd = pn; nd; nd = rb_prev(nd))  		j++;  	if (i != j) { -		pr_info("backwards %d, forwards %d\n", j, i); +		pr_emerg("backwards %d, forwards %d\n", j, i);  		bug = 1;  	}  	return bug ? -1 : i; @@ -426,17 +431,17 @@ static void validate_mm(struct mm_struct *mm)  		i++;  	}  	if (i != mm->map_count) { -		pr_info("map_count %d vm_next %d\n", mm->map_count, i); +		pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);  		bug = 1;  	}  	if (highest_address != mm->highest_vm_end) { -		pr_info("mm->highest_vm_end %lx, found %lx\n", +		pr_emerg("mm->highest_vm_end %lx, found %lx\n",  		       mm->highest_vm_end, highest_address);  		bug = 1;  	}  	i = browse_rb(&mm->mm_rb);  	if (i != mm->map_count) { -		pr_info("map_count %d rb %d\n", mm->map_count, i); +		pr_emerg("map_count %d rb %d\n", mm->map_count, i);  		bug = 1;  	}  	BUG_ON(bug); @@ -617,7 +622,7 @@ static void __vma_link_file(struct vm_area_struct *vma)  		if (vma->vm_flags & VM_DENYWRITE)  			atomic_dec(&file_inode(file)->i_writecount);  		if (vma->vm_flags & VM_SHARED) -			mapping->i_mmap_writable++; +			atomic_inc(&mapping->i_mmap_writable);  		flush_dcache_mmap_lock(mapping);  		if (unlikely(vma->vm_flags & VM_NONLINEAR)) @@ -1572,6 +1577,17 @@ munmap_back:  			if (error)  				goto free_vma;  		} +		if (vm_flags & VM_SHARED) { +			error = mapping_map_writable(file->f_mapping); +			if (error) +				goto allow_write_and_free_vma; +		} + +		/* ->mmap() can change vma->vm_file, but must guarantee that +		 * vma_link() below can deny write-access if VM_DENYWRITE is set +		 * and map writably if VM_SHARED is set. This usually means the +		 * new file must not have been exposed to user-space, yet. +		 */  		vma->vm_file = get_file(file);  		error = file->f_op->mmap(file, vma);  		if (error) @@ -1611,8 +1627,12 @@ munmap_back:  	vma_link(mm, vma, prev, rb_link, rb_parent);  	/* Once vma denies write, undo our temporary denial count */ -	if (vm_flags & VM_DENYWRITE) -		allow_write_access(file); +	if (file) { +		if (vm_flags & VM_SHARED) +			mapping_unmap_writable(file->f_mapping); +		if (vm_flags & VM_DENYWRITE) +			allow_write_access(file); +	}  	file = vma->vm_file;  out:  	perf_event_mmap(vma); @@ -1641,14 +1661,17 @@ out:  	return addr;  unmap_and_free_vma: -	if (vm_flags & VM_DENYWRITE) -		allow_write_access(file);  	vma->vm_file = NULL;  	fput(file);  	/* Undo any partial mapping done by a device driver. */  	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);  	charged = 0; +	if (vm_flags & VM_SHARED) +		mapping_unmap_writable(file->f_mapping); +allow_write_and_free_vma: +	if (vm_flags & VM_DENYWRITE) +		allow_write_access(file);  free_vma:  	kmem_cache_free(vm_area_cachep, vma);  unacct_error:  |