diff options
Diffstat (limited to 'arch/x86/kernel')
| -rw-r--r-- | arch/x86/kernel/cpu/common.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/kvm.c | 65 | ||||
| -rw-r--r-- | arch/x86/kernel/paravirt.c | 5 | ||||
| -rw-r--r-- | arch/x86/kernel/process.c | 2 | 
4 files changed, 51 insertions, 23 deletions
| diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 52c9bfbbdb2a..4cdb123ff66a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -445,7 +445,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)  	 * cpuid bit to be set.  We need to ensure that we  	 * update that bit in this CPU's "cpu_info".  	 */ -	get_cpu_cap(c); +	set_cpu_cap(c, X86_FEATURE_OSPKE);  }  #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index d817f255aed8..6efe0410fb72 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -425,7 +425,29 @@ static void __init sev_map_percpu_data(void)  	}  } +static bool pv_tlb_flush_supported(void) +{ +	return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && +		!kvm_para_has_hint(KVM_HINTS_REALTIME) && +		kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)); +} + +static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask); +  #ifdef CONFIG_SMP + +static bool pv_ipi_supported(void) +{ +	return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI); +} + +static bool pv_sched_yield_supported(void) +{ +	return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) && +		!kvm_para_has_hint(KVM_HINTS_REALTIME) && +	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)); +} +  #define KVM_IPI_CLUSTER_SIZE	(2 * BITS_PER_LONG)  static void __send_ipi_mask(const struct cpumask *mask, int vector) @@ -490,12 +512,12 @@ static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)  static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)  {  	unsigned int this_cpu = smp_processor_id(); -	struct cpumask new_mask; +	struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);  	const struct cpumask *local_mask; -	cpumask_copy(&new_mask, mask); -	cpumask_clear_cpu(this_cpu, &new_mask); -	local_mask = &new_mask; +	cpumask_copy(new_mask, mask); +	cpumask_clear_cpu(this_cpu, new_mask); +	local_mask = new_mask;  	__send_ipi_mask(local_mask, vector);  } @@ -575,7 +597,6 @@ static void __init kvm_apf_trap_init(void)  	update_intr_gate(X86_TRAP_PF, async_page_fault);  } -static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);  static void kvm_flush_tlb_others(const struct cpumask *cpumask,  			const struct flush_tlb_info *info) @@ -583,7 +604,7 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask,  	u8 state;  	int cpu;  	struct kvm_steal_time *src; -	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask); +	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);  	cpumask_copy(flushmask, cpumask);  	/* @@ -619,11 +640,10 @@ static void __init kvm_guest_init(void)  		pv_ops.time.steal_clock = kvm_steal_clock;  	} -	if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && -	    !kvm_para_has_hint(KVM_HINTS_REALTIME) && -	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { +	if (pv_tlb_flush_supported()) {  		pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;  		pv_ops.mmu.tlb_remove_table = tlb_remove_table; +		pr_info("KVM setup pv remote TLB flush\n");  	}  	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) @@ -632,9 +652,7 @@ static void __init kvm_guest_init(void)  #ifdef CONFIG_SMP  	smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;  	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; -	if (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) && -	    !kvm_para_has_hint(KVM_HINTS_REALTIME) && -	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { +	if (pv_sched_yield_supported()) {  		smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;  		pr_info("KVM setup pv sched yield\n");  	} @@ -700,7 +718,7 @@ static uint32_t __init kvm_detect(void)  static void __init kvm_apic_init(void)  {  #if defined(CONFIG_SMP) -	if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI)) +	if (pv_ipi_supported())  		kvm_setup_pv_ipi();  #endif  } @@ -732,26 +750,31 @@ static __init int activate_jump_labels(void)  }  arch_initcall(activate_jump_labels); -static __init int kvm_setup_pv_tlb_flush(void) +static __init int kvm_alloc_cpumask(void)  {  	int cpu; +	bool alloc = false;  	if (!kvm_para_available() || nopv)  		return 0; -	if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && -	    !kvm_para_has_hint(KVM_HINTS_REALTIME) && -	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { +	if (pv_tlb_flush_supported()) +		alloc = true; + +#if defined(CONFIG_SMP) +	if (pv_ipi_supported()) +		alloc = true; +#endif + +	if (alloc)  		for_each_possible_cpu(cpu) { -			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), +			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),  				GFP_KERNEL, cpu_to_node(cpu));  		} -		pr_info("KVM setup pv remote TLB flush\n"); -	}  	return 0;  } -arch_initcall(kvm_setup_pv_tlb_flush); +arch_initcall(kvm_alloc_cpumask);  #ifdef CONFIG_PARAVIRT_SPINLOCKS diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 789f5e4f89de..c131ba4e70ef 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -30,6 +30,7 @@  #include <asm/timer.h>  #include <asm/special_insns.h>  #include <asm/tlb.h> +#include <asm/io_bitmap.h>  /*   * nop stub, which must not clobber anything *including the stack* to @@ -341,6 +342,10 @@ struct paravirt_patch_template pv_ops = {  	.cpu.iret		= native_iret,  	.cpu.swapgs		= native_swapgs, +#ifdef CONFIG_X86_IOPL_IOPERM +	.cpu.update_io_bitmap	= native_tss_update_io_bitmap, +#endif +  	.cpu.start_context_switch	= paravirt_nop,  	.cpu.end_context_switch		= paravirt_nop, diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 839b5244e3b7..3053c85e0e42 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -374,7 +374,7 @@ static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)  /**   * tss_update_io_bitmap - Update I/O bitmap before exiting to usermode   */ -void tss_update_io_bitmap(void) +void native_tss_update_io_bitmap(void)  {  	struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);  	struct thread_struct *t = ¤t->thread; |