diff options
Diffstat (limited to 'mm/mmap.c')
| -rw-r--r-- | mm/mmap.c | 64 | 
1 files changed, 23 insertions, 41 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index d1eb87ef4b1a..ff1944d8d458 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -182,12 +182,12 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)  	if (vma->vm_file)  		fput(vma->vm_file);  	mpol_put(vma_policy(vma)); -	kmem_cache_free(vm_area_cachep, vma); +	vm_area_free(vma);  	return next;  } -static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf); - +static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, +		struct list_head *uf);  SYSCALL_DEFINE1(brk, unsigned long, brk)  {  	unsigned long retval; @@ -245,7 +245,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)  		goto out;  	/* Ok, looks good - let it rip. */ -	if (do_brk(oldbrk, newbrk-oldbrk, &uf) < 0) +	if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)  		goto out;  set_brk: @@ -911,7 +911,7 @@ again:  			anon_vma_merge(vma, next);  		mm->map_count--;  		mpol_put(vma_policy(next)); -		kmem_cache_free(vm_area_cachep, next); +		vm_area_free(next);  		/*  		 * In mprotect's case 6 (see comments on vma_merge),  		 * we must remove another next too. It would clutter @@ -1729,19 +1729,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr,  	 * specific mapper. the address has already been validated, but  	 * not unmapped, but the maps are removed from the list.  	 */ -	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); +	vma = vm_area_alloc(mm);  	if (!vma) {  		error = -ENOMEM;  		goto unacct_error;  	} -	vma->vm_mm = mm;  	vma->vm_start = addr;  	vma->vm_end = addr + len;  	vma->vm_flags = vm_flags;  	vma->vm_page_prot = vm_get_page_prot(vm_flags);  	vma->vm_pgoff = pgoff; -	INIT_LIST_HEAD(&vma->anon_vma_chain);  	if (file) {  		if (vm_flags & VM_DENYWRITE) { @@ -1832,7 +1830,7 @@ allow_write_and_free_vma:  	if (vm_flags & VM_DENYWRITE)  		allow_write_access(file);  free_vma: -	kmem_cache_free(vm_area_cachep, vma); +	vm_area_free(vma);  unacct_error:  	if (charged)  		vm_unacct_memory(charged); @@ -2620,15 +2618,10 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,  			return err;  	} -	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); +	new = vm_area_dup(vma);  	if (!new)  		return -ENOMEM; -	/* most fields are the same, copy all, and then fixup */ -	*new = *vma; - -	INIT_LIST_HEAD(&new->anon_vma_chain); -  	if (new_below)  		new->vm_end = addr;  	else { @@ -2669,7 +2662,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,   out_free_mpol:  	mpol_put(vma_policy(new));   out_free_vma: -	kmem_cache_free(vm_area_cachep, new); +	vm_area_free(new);  	return err;  } @@ -2929,21 +2922,14 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)   *  anonymous maps.  eventually we may be able to do some   *  brk-specific accounting here.   */ -static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf) +static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)  {  	struct mm_struct *mm = current->mm;  	struct vm_area_struct *vma, *prev; -	unsigned long len;  	struct rb_node **rb_link, *rb_parent;  	pgoff_t pgoff = addr >> PAGE_SHIFT;  	int error; -	len = PAGE_ALIGN(request); -	if (len < request) -		return -ENOMEM; -	if (!len) -		return 0; -  	/* Until we need other flags, refuse anything except VM_EXEC. */  	if ((flags & (~VM_EXEC)) != 0)  		return -EINVAL; @@ -2991,14 +2977,12 @@ static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long  	/*  	 * create a vma struct for an anonymous mapping  	 */ -	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); +	vma = vm_area_alloc(mm);  	if (!vma) {  		vm_unacct_memory(len >> PAGE_SHIFT);  		return -ENOMEM;  	} -	INIT_LIST_HEAD(&vma->anon_vma_chain); -	vma->vm_mm = mm;  	vma->vm_start = addr;  	vma->vm_end = addr + len;  	vma->vm_pgoff = pgoff; @@ -3015,18 +2999,20 @@ out:  	return 0;  } -static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf) -{ -	return do_brk_flags(addr, len, 0, uf); -} - -int vm_brk_flags(unsigned long addr, unsigned long len, unsigned long flags) +int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)  {  	struct mm_struct *mm = current->mm; +	unsigned long len;  	int ret;  	bool populate;  	LIST_HEAD(uf); +	len = PAGE_ALIGN(request); +	if (len < request) +		return -ENOMEM; +	if (!len) +		return 0; +  	if (down_write_killable(&mm->mmap_sem))  		return -EINTR; @@ -3207,16 +3193,14 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,  		}  		*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);  	} else { -		new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); +		new_vma = vm_area_dup(vma);  		if (!new_vma)  			goto out; -		*new_vma = *vma;  		new_vma->vm_start = addr;  		new_vma->vm_end = addr + len;  		new_vma->vm_pgoff = pgoff;  		if (vma_dup_policy(vma, new_vma))  			goto out_free_vma; -		INIT_LIST_HEAD(&new_vma->anon_vma_chain);  		if (anon_vma_clone(new_vma, vma))  			goto out_free_mempol;  		if (new_vma->vm_file) @@ -3231,7 +3215,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,  out_free_mempol:  	mpol_put(vma_policy(new_vma));  out_free_vma: -	kmem_cache_free(vm_area_cachep, new_vma); +	vm_area_free(new_vma);  out:  	return NULL;  } @@ -3355,12 +3339,10 @@ static struct vm_area_struct *__install_special_mapping(  	int ret;  	struct vm_area_struct *vma; -	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); +	vma = vm_area_alloc(mm);  	if (unlikely(vma == NULL))  		return ERR_PTR(-ENOMEM); -	INIT_LIST_HEAD(&vma->anon_vma_chain); -	vma->vm_mm = mm;  	vma->vm_start = addr;  	vma->vm_end = addr + len; @@ -3381,7 +3363,7 @@ static struct vm_area_struct *__install_special_mapping(  	return vma;  out: -	kmem_cache_free(vm_area_cachep, vma); +	vm_area_free(vma);  	return ERR_PTR(ret);  }  |