diff options
Diffstat (limited to 'arch/x86/mm/tlb.c')
| -rw-r--r-- | arch/x86/mm/tlb.c | 109 | 
1 files changed, 55 insertions, 54 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index dd8dda167a24..ee61c36d64f8 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -49,6 +49,13 @@ void leave_mm(int cpu)  	if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {  		cpumask_clear_cpu(cpu, mm_cpumask(active_mm));  		load_cr3(swapper_pg_dir); +		/* +		 * This gets called in the idle path where RCU +		 * functions differently.  Tracing normally +		 * uses RCU, so we have to call the tracepoint +		 * specially here. +		 */ +		trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);  	}  }  EXPORT_SYMBOL_GPL(leave_mm); @@ -102,20 +109,24 @@ static void flush_tlb_func(void *info)  	if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))  		return; +	if (!f->flush_end) +		f->flush_end = f->flush_start + PAGE_SIZE;  	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);  	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { -		if (f->flush_end == TLB_FLUSH_ALL) +		if (f->flush_end == TLB_FLUSH_ALL) {  			local_flush_tlb(); -		else if (!f->flush_end) -			__flush_tlb_single(f->flush_start); -		else { +			trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); +		} else {  			unsigned long addr; +			unsigned long nr_pages = +				f->flush_end - f->flush_start / PAGE_SIZE;  			addr = f->flush_start;  			while (addr < f->flush_end) {  				__flush_tlb_single(addr);  				addr += PAGE_SIZE;  			} +			trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);  		}  	} else  		leave_mm(smp_processor_id()); @@ -153,46 +164,45 @@ void flush_tlb_current_task(void)  	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);  	local_flush_tlb(); +	trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);  	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)  		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);  	preempt_enable();  } +/* + * See Documentation/x86/tlb.txt for details.  We choose 33 + * because it is large enough to cover the vast majority (at + * least 95%) of allocations, and is small enough that we are + * confident it will not cause too much overhead.  Each single + * flush is about 100 ns, so this caps the maximum overhead at + * _about_ 3,000 ns. + * + * This is in units of pages. + */ +static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; +  void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,  				unsigned long end, unsigned long vmflag)  {  	unsigned long addr; -	unsigned act_entries, tlb_entries = 0; -	unsigned long nr_base_pages; +	/* do a global flush by default */ +	unsigned long base_pages_to_flush = TLB_FLUSH_ALL;  	preempt_disable();  	if (current->active_mm != mm) -		goto flush_all; +		goto out;  	if (!current->mm) {  		leave_mm(smp_processor_id()); -		goto flush_all; +		goto out;  	} -	if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 -					|| vmflag & VM_HUGETLB) { -		local_flush_tlb(); -		goto flush_all; -	} - -	/* In modern CPU, last level tlb used for both data/ins */ -	if (vmflag & VM_EXEC) -		tlb_entries = tlb_lli_4k[ENTRIES]; -	else -		tlb_entries = tlb_lld_4k[ENTRIES]; +	if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) +		base_pages_to_flush = (end - start) >> PAGE_SHIFT; -	/* Assume all of TLB entries was occupied by this task */ -	act_entries = tlb_entries >> tlb_flushall_shift; -	act_entries = mm->total_vm > act_entries ? act_entries : mm->total_vm; -	nr_base_pages = (end - start) >> PAGE_SHIFT; - -	/* tlb_flushall_shift is on balance point, details in commit log */ -	if (nr_base_pages > act_entries) { +	if (base_pages_to_flush > tlb_single_page_flush_ceiling) { +		base_pages_to_flush = TLB_FLUSH_ALL;  		count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);  		local_flush_tlb();  	} else { @@ -201,17 +211,15 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,  			count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);  			__flush_tlb_single(addr);  		} - -		if (cpumask_any_but(mm_cpumask(mm), -				smp_processor_id()) < nr_cpu_ids) -			flush_tlb_others(mm_cpumask(mm), mm, start, end); -		preempt_enable(); -		return;  	} - -flush_all: +	trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); +out: +	if (base_pages_to_flush == TLB_FLUSH_ALL) { +		start = 0UL; +		end = TLB_FLUSH_ALL; +	}  	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) -		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); +		flush_tlb_others(mm_cpumask(mm), mm, start, end);  	preempt_enable();  } @@ -260,32 +268,26 @@ static void do_kernel_range_flush(void *info)  void flush_tlb_kernel_range(unsigned long start, unsigned long end)  { -	unsigned act_entries; -	struct flush_tlb_info info; - -	/* In modern CPU, last level tlb used for both data/ins */ -	act_entries = tlb_lld_4k[ENTRIES];  	/* Balance as user space task's flush, a bit conservative */ -	if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 || -		(end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) - +	if (end == TLB_FLUSH_ALL || +	    (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {  		on_each_cpu(do_flush_tlb_all, NULL, 1); -	else { +	} else { +		struct flush_tlb_info info;  		info.flush_start = start;  		info.flush_end = end;  		on_each_cpu(do_kernel_range_flush, &info, 1);  	}  } -#ifdef CONFIG_DEBUG_TLBFLUSH  static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,  			     size_t count, loff_t *ppos)  {  	char buf[32];  	unsigned int len; -	len = sprintf(buf, "%hd\n", tlb_flushall_shift); +	len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);  	return simple_read_from_buffer(user_buf, count, ppos, buf, len);  } @@ -294,20 +296,20 @@ static ssize_t tlbflush_write_file(struct file *file,  {  	char buf[32];  	ssize_t len; -	s8 shift; +	int ceiling;  	len = min(count, sizeof(buf) - 1);  	if (copy_from_user(buf, user_buf, len))  		return -EFAULT;  	buf[len] = '\0'; -	if (kstrtos8(buf, 0, &shift)) +	if (kstrtoint(buf, 0, &ceiling))  		return -EINVAL; -	if (shift < -1 || shift >= BITS_PER_LONG) +	if (ceiling < 0)  		return -EINVAL; -	tlb_flushall_shift = shift; +	tlb_single_page_flush_ceiling = ceiling;  	return count;  } @@ -317,11 +319,10 @@ static const struct file_operations fops_tlbflush = {  	.llseek = default_llseek,  }; -static int __init create_tlb_flushall_shift(void) +static int __init create_tlb_single_page_flush_ceiling(void)  { -	debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR, +	debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,  			    arch_debugfs_dir, NULL, &fops_tlbflush);  	return 0;  } -late_initcall(create_tlb_flushall_shift); -#endif +late_initcall(create_tlb_single_page_flush_ceiling);  |