diff options
Diffstat (limited to 'arch/x86/kvm/vmx/nested.c')
| -rw-r--r-- | arch/x86/kvm/vmx/nested.c | 65 | 
1 files changed, 46 insertions, 19 deletions
| diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 9c941535f78c..f235f77cbc03 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -245,7 +245,8 @@ static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,  	src = &prev->host_state;  	dest = &vmx->loaded_vmcs->host_state; -	vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base); +	vmx_set_vmcs_host_state(dest, src->cr3, src->fs_sel, src->gs_sel, +				src->fs_base, src->gs_base);  	dest->ldt_sel = src->ldt_sel;  #ifdef CONFIG_X86_64  	dest->ds_sel = src->ds_sel; @@ -269,7 +270,13 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)  	vmx_sync_vmcs_host_state(vmx, prev);  	put_cpu(); -	vmx_register_cache_reset(vcpu); +	vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET; + +	/* +	 * All lazily updated registers will be reloaded from VMCS12 on both +	 * vmentry and vmexit. +	 */ +	vcpu->arch.regs_dirty = 0;  }  /* @@ -391,9 +398,11 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,  static void nested_ept_new_eptp(struct kvm_vcpu *vcpu)  { -	kvm_init_shadow_ept_mmu(vcpu, -				to_vmx(vcpu)->nested.msrs.ept_caps & -				VMX_EPT_EXECUTE_ONLY_BIT, +	struct vcpu_vmx *vmx = to_vmx(vcpu); +	bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT; +	int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps); + +	kvm_init_shadow_ept_mmu(vcpu, execonly, ept_lpage_level,  				nested_ept_ad_enabled(vcpu),  				nested_ept_get_eptp(vcpu));  } @@ -591,6 +600,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,  	int msr;  	unsigned long *msr_bitmap_l1;  	unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap; +	struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;  	struct kvm_host_map *map = &vmx->nested.msr_bitmap_map;  	/* Nothing to do if the MSR bitmap is not in use.  */ @@ -598,6 +608,19 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,  	    !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))  		return false; +	/* +	 * MSR bitmap update can be skipped when: +	 * - MSR bitmap for L1 hasn't changed. +	 * - Nested hypervisor (L1) is attempting to launch the same L2 as +	 *   before. +	 * - Nested hypervisor (L1) has enabled 'Enlightened MSR Bitmap' feature +	 *   and tells KVM (L0) there were no changes in MSR bitmap for L2. +	 */ +	if (!vmx->nested.force_msr_bitmap_recalc && evmcs && +	    evmcs->hv_enlightenments_control.msr_bitmap && +	    evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP) +		return true; +  	if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))  		return false; @@ -664,6 +687,8 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,  	kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false); +	vmx->nested.force_msr_bitmap_recalc = false; +  	return true;  } @@ -1095,7 +1120,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,  	 * must not be dereferenced.  	 */  	if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) && -	    CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) { +	    CC(!load_pdptrs(vcpu, cr3))) {  		*entry_failure_code = ENTRY_FAIL_PDPTE;  		return -EINVAL;  	} @@ -1104,7 +1129,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,  		kvm_mmu_new_pgd(vcpu, cr3);  	vcpu->arch.cr3 = cr3; -	kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); +	kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);  	/* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */  	kvm_init_mmu(vcpu); @@ -2021,10 +2046,13 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(  	 * Clean fields data can't be used on VMLAUNCH and when we switch  	 * between different L2 guests as KVM keeps a single VMCS12 per L1.  	 */ -	if (from_launch || evmcs_gpa_changed) +	if (from_launch || evmcs_gpa_changed) {  		vmx->nested.hv_evmcs->hv_clean_fields &=  			~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; +		vmx->nested.force_msr_bitmap_recalc = true; +	} +  	return EVMPTRLD_SUCCEEDED;  } @@ -3027,7 +3055,7 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,  static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)  {  	struct vcpu_vmx *vmx = to_vmx(vcpu); -	unsigned long cr3, cr4; +	unsigned long cr4;  	bool vm_fail;  	if (!nested_early_check) @@ -3050,12 +3078,6 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)  	 */  	vmcs_writel(GUEST_RFLAGS, 0); -	cr3 = __get_current_cr3_fast(); -	if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { -		vmcs_writel(HOST_CR3, cr3); -		vmx->loaded_vmcs->host_state.cr3 = cr3; -	} -  	cr4 = cr4_read_shadow();  	if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {  		vmcs_writel(HOST_CR4, cr4); @@ -3145,7 +3167,7 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)  		 * the guest CR3 might be restored prior to setting the nested  		 * state which can lead to a load of wrong PDPTRs.  		 */ -		if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))) +		if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))  			return false;  	} @@ -3504,10 +3526,13 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)  	if (evmptrld_status == EVMPTRLD_ERROR) {  		kvm_queue_exception(vcpu, UD_VECTOR);  		return 1; -	} else if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) { -		return nested_vmx_failInvalid(vcpu);  	} +	kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); + +	if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) +		return nested_vmx_failInvalid(vcpu); +  	if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) &&  	       vmx->nested.current_vmptr == INVALID_GPA))  		return nested_vmx_failInvalid(vcpu); @@ -3603,7 +3628,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)  		    !(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) &&  		      (vmcs12->guest_rflags & X86_EFLAGS_IF))) {  			vmx->nested.nested_run_pending = 0; -			return kvm_vcpu_halt(vcpu); +			return kvm_emulate_halt_noskip(vcpu);  		}  		break;  	case GUEST_ACTIVITY_WAIT_SIPI: @@ -5258,6 +5283,7 @@ static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)  		vmx->nested.need_vmcs12_to_shadow_sync = true;  	}  	vmx->nested.dirty_vmcs12 = true; +	vmx->nested.force_msr_bitmap_recalc = true;  }  /* Emulate the VMPTRLD instruction */ @@ -6393,6 +6419,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,  		goto error_guest_mode;  	vmx->nested.dirty_vmcs12 = true; +	vmx->nested.force_msr_bitmap_recalc = true;  	ret = nested_vmx_enter_non_root_mode(vcpu, false);  	if (ret)  		goto error_guest_mode; |