diff options
Diffstat (limited to 'arch/x86/kernel/kvm.c')
| -rw-r--r-- | arch/x86/kernel/kvm.c | 19 | 
1 files changed, 12 insertions, 7 deletions
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 59abbdad7729..d77481ecb0d5 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -313,7 +313,7 @@ static void kvm_register_steal_time(void)  		return;  	wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED)); -	pr_info("stealtime: cpu %d, msr %llx\n", cpu, +	pr_debug("stealtime: cpu %d, msr %llx\n", cpu,  		(unsigned long long) slow_virt_to_phys(st));  } @@ -350,7 +350,7 @@ static void kvm_guest_cpu_init(void)  		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);  		__this_cpu_write(apf_reason.enabled, 1); -		pr_info("setup async PF for cpu %d\n", smp_processor_id()); +		pr_debug("setup async PF for cpu %d\n", smp_processor_id());  	}  	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) { @@ -376,7 +376,7 @@ static void kvm_pv_disable_apf(void)  	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);  	__this_cpu_write(apf_reason.enabled, 0); -	pr_info("disable async PF for cpu %d\n", smp_processor_id()); +	pr_debug("disable async PF for cpu %d\n", smp_processor_id());  }  static void kvm_disable_steal_time(void) @@ -462,19 +462,24 @@ static bool pv_tlb_flush_supported(void)  {  	return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&  		!kvm_para_has_hint(KVM_HINTS_REALTIME) && -		kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)); +		kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) && +		!boot_cpu_has(X86_FEATURE_MWAIT) && +		(num_possible_cpus() != 1));  }  static bool pv_ipi_supported(void)  { -	return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI); +	return (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI) && +	       (num_possible_cpus() != 1));  }  static bool pv_sched_yield_supported(void)  {  	return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&  		!kvm_para_has_hint(KVM_HINTS_REALTIME) && -	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)); +	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) && +	    !boot_cpu_has(X86_FEATURE_MWAIT) && +	    (num_possible_cpus() != 1));  }  #define KVM_IPI_CLUSTER_SIZE	(2 * BITS_PER_LONG) @@ -619,7 +624,7 @@ static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)  	/* Make sure other vCPUs get a chance to run if they need to. */  	for_each_cpu(cpu, mask) { -		if (vcpu_is_preempted(cpu)) { +		if (!idle_cpu(cpu) && vcpu_is_preempted(cpu)) {  			kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));  			break;  		}  |