diff options
Diffstat (limited to 'mm/vmalloc.c')
| -rw-r--r-- | mm/vmalloc.c | 62 | 
1 files changed, 19 insertions, 43 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ebff729cc956..cfea25be7754 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -603,26 +603,6 @@ static void unmap_vmap_area(struct vmap_area *va)  	vunmap_page_range(va->va_start, va->va_end);  } -static void vmap_debug_free_range(unsigned long start, unsigned long end) -{ -	/* -	 * Unmap page tables and force a TLB flush immediately if pagealloc -	 * debugging is enabled.  This catches use after free bugs similarly to -	 * those in linear kernel virtual address space after a page has been -	 * freed. -	 * -	 * All the lazy freeing logic is still retained, in order to minimise -	 * intrusiveness of this debugging feature. -	 * -	 * This is going to be *slow* (linear kernel virtual address debugging -	 * doesn't do a broadcast TLB flush so it is a lot faster). -	 */ -	if (debug_pagealloc_enabled()) { -		vunmap_page_range(start, end); -		flush_tlb_kernel_range(start, end); -	} -} -  /*   * lazy_max_pages is the maximum amount of virtual address space we gather up   * before attempting to purge with a TLB flush. @@ -756,6 +736,9 @@ static void free_unmap_vmap_area(struct vmap_area *va)  {  	flush_cache_vunmap(va->va_start, va->va_end);  	unmap_vmap_area(va); +	if (debug_pagealloc_enabled()) +		flush_tlb_kernel_range(va->va_start, va->va_end); +  	free_vmap_area_noflush(va);  } @@ -1053,6 +1036,10 @@ static void vb_free(const void *addr, unsigned long size)  	vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); +	if (debug_pagealloc_enabled()) +		flush_tlb_kernel_range((unsigned long)addr, +					(unsigned long)addr + size); +  	spin_lock(&vb->lock);  	/* Expand dirty range */ @@ -1141,16 +1128,16 @@ void vm_unmap_ram(const void *mem, unsigned int count)  	BUG_ON(addr > VMALLOC_END);  	BUG_ON(!PAGE_ALIGNED(addr)); -	debug_check_no_locks_freed(mem, size); -	vmap_debug_free_range(addr, addr+size); -  	if (likely(count <= VMAP_MAX_ALLOC)) { +		debug_check_no_locks_freed(mem, size);  		vb_free(mem, size);  		return;  	}  	va = find_vmap_area(addr);  	BUG_ON(!va); +	debug_check_no_locks_freed((void *)va->va_start, +				    (va->va_end - va->va_start));  	free_unmap_vmap_area(va);  }  EXPORT_SYMBOL(vm_unmap_ram); @@ -1499,7 +1486,6 @@ struct vm_struct *remove_vm_area(const void *addr)  		va->flags |= VM_LAZY_FREE;  		spin_unlock(&vmap_area_lock); -		vmap_debug_free_range(va->va_start, va->va_end);  		kasan_free_shadow(vm);  		free_unmap_vmap_area(va); @@ -1519,16 +1505,17 @@ static void __vunmap(const void *addr, int deallocate_pages)  			addr))  		return; -	area = remove_vm_area(addr); +	area = find_vmap_area((unsigned long)addr)->vm;  	if (unlikely(!area)) {  		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",  				addr);  		return;  	} -	debug_check_no_locks_freed(addr, get_vm_area_size(area)); -	debug_check_no_obj_freed(addr, get_vm_area_size(area)); +	debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); +	debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); +	remove_vm_area(addr);  	if (deallocate_pages) {  		int i; @@ -2751,25 +2738,14 @@ static const struct seq_operations vmalloc_op = {  	.show = s_show,  }; -static int vmalloc_open(struct inode *inode, struct file *file) +static int __init proc_vmalloc_init(void)  {  	if (IS_ENABLED(CONFIG_NUMA)) -		return seq_open_private(file, &vmalloc_op, -					nr_node_ids * sizeof(unsigned int)); +		proc_create_seq_private("vmallocinfo", 0400, NULL, +				&vmalloc_op, +				nr_node_ids * sizeof(unsigned int), NULL);  	else -		return seq_open(file, &vmalloc_op); -} - -static const struct file_operations proc_vmalloc_operations = { -	.open		= vmalloc_open, -	.read		= seq_read, -	.llseek		= seq_lseek, -	.release	= seq_release_private, -}; - -static int __init proc_vmalloc_init(void) -{ -	proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); +		proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);  	return 0;  }  module_init(proc_vmalloc_init);  |