diff options
Diffstat (limited to 'arch/x86/kvm/vmx.c')
| -rw-r--r-- | arch/x86/kvm/vmx.c | 26 | 
1 files changed, 20 insertions, 6 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 3dec126aa302..051dab74e4e9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -51,6 +51,7 @@  #include <asm/apic.h>  #include <asm/irq_remapping.h>  #include <asm/mmu_context.h> +#include <asm/microcode.h>  #include <asm/nospec-branch.h>  #include "trace.h" @@ -3226,6 +3227,11 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,  	return !(val & ~valid_bits);  } +static int vmx_get_msr_feature(struct kvm_msr_entry *msr) +{ +	return 1; +} +  /*   * Reads an msr value (of 'msr_index') into 'pdata'.   * Returns 0 on success, non-0 otherwise. @@ -4485,7 +4491,8 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)  		vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,  			      SECONDARY_EXEC_DESC);  		hw_cr4 &= ~X86_CR4_UMIP; -	} else +	} else if (!is_guest_mode(vcpu) || +	           !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))  		vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,  				SECONDARY_EXEC_DESC); @@ -5765,6 +5772,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)  	vmx->rmode.vm86_active = 0;  	vmx->spec_ctrl = 0; +	vcpu->arch.microcode_version = 0x100000000ULL;  	vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();  	kvm_set_cr8(vcpu, 0); @@ -9452,7 +9460,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)  	 * being speculatively taken.  	 */  	if (vmx->spec_ctrl) -		wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); +		native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);  	vmx->__launched = vmx->loaded_vmcs->launched;  	asm( @@ -9587,11 +9595,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)  	 * If the L02 MSR bitmap does not intercept the MSR, then we need to  	 * save it.  	 */ -	if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) -		rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); +	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) +		vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);  	if (vmx->spec_ctrl) -		wrmsrl(MSR_IA32_SPEC_CTRL, 0); +		native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);  	/* Eliminate branch target predictions from guest mode */  	vmexit_fill_RSB(); @@ -11199,7 +11207,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)  	if (ret)  		return ret; -	if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) +	/* +	 * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken +	 * by event injection, halt vcpu. +	 */ +	if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) && +	    !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK))  		return kvm_vcpu_halt(vcpu);  	vmx->nested.nested_run_pending = 1; @@ -12290,6 +12303,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {  	.vcpu_put = vmx_vcpu_put,  	.update_bp_intercept = update_exception_bitmap, +	.get_msr_feature = vmx_get_msr_feature,  	.get_msr = vmx_get_msr,  	.set_msr = vmx_set_msr,  	.get_segment_base = vmx_get_segment_base,  |