diff options
Diffstat (limited to 'arch/x86/kvm/x86.c')
| -rw-r--r-- | arch/x86/kvm/x86.c | 64 | 
1 files changed, 31 insertions, 33 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 099b851dabaf..a0d1fc80ac5a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -800,7 +800,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)  }  EXPORT_SYMBOL_GPL(kvm_lmsw); -static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) +void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)  {  	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&  			!vcpu->guest_xcr0_loaded) { @@ -810,8 +810,9 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)  		vcpu->guest_xcr0_loaded = 1;  	}  } +EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0); -static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) +void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)  {  	if (vcpu->guest_xcr0_loaded) {  		if (vcpu->arch.xcr0 != host_xcr0) @@ -819,6 +820,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)  		vcpu->guest_xcr0_loaded = 0;  	}  } +EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);  static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)  { @@ -3093,7 +3095,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)  		break;  	case KVM_CAP_NESTED_STATE:  		r = kvm_x86_ops->get_nested_state ? -			kvm_x86_ops->get_nested_state(NULL, 0, 0) : 0; +			kvm_x86_ops->get_nested_state(NULL, NULL, 0) : 0;  		break;  	default:  		break; @@ -3528,7 +3530,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,  	memset(&events->reserved, 0, sizeof(events->reserved));  } -static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags); +static void kvm_smm_changed(struct kvm_vcpu *vcpu);  static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,  					      struct kvm_vcpu_events *events) @@ -3588,12 +3590,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,  		vcpu->arch.apic->sipi_vector = events->sipi_vector;  	if (events->flags & KVM_VCPUEVENT_VALID_SMM) { -		u32 hflags = vcpu->arch.hflags; -		if (events->smi.smm) -			hflags |= HF_SMM_MASK; -		else -			hflags &= ~HF_SMM_MASK; -		kvm_set_hflags(vcpu, hflags); +		if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { +			if (events->smi.smm) +				vcpu->arch.hflags |= HF_SMM_MASK; +			else +				vcpu->arch.hflags &= ~HF_SMM_MASK; +			kvm_smm_changed(vcpu); +		}  		vcpu->arch.smi_pending = events->smi.pending; @@ -4270,7 +4273,7 @@ static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,  }  static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, -					  u32 kvm_nr_mmu_pages) +					 unsigned long kvm_nr_mmu_pages)  {  	if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)  		return -EINVAL; @@ -4284,7 +4287,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,  	return 0;  } -static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) +static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)  {  	return kvm->arch.n_max_mmu_pages;  } @@ -5958,12 +5961,18 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)  static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)  { -	kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags); +	emul_to_vcpu(ctxt)->arch.hflags = emul_flags; +} + +static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, +				  const char *smstate) +{ +	return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smstate);  } -static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase) +static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)  { -	return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase); +	kvm_smm_changed(emul_to_vcpu(ctxt));  }  static const struct x86_emulate_ops emulate_ops = { @@ -6006,6 +6015,7 @@ static const struct x86_emulate_ops emulate_ops = {  	.get_hflags          = emulator_get_hflags,  	.set_hflags          = emulator_set_hflags,  	.pre_leave_smm       = emulator_pre_leave_smm, +	.post_leave_smm      = emulator_post_leave_smm,  };  static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) @@ -6247,16 +6257,6 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu)  	kvm_mmu_reset_context(vcpu);  } -static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags) -{ -	unsigned changed = vcpu->arch.hflags ^ emul_flags; - -	vcpu->arch.hflags = emul_flags; - -	if (changed & HF_SMM_MASK) -		kvm_smm_changed(vcpu); -} -  static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,  				unsigned long *db)  { @@ -7441,9 +7441,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)  	put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);  } +#ifdef CONFIG_X86_64  static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)  { -#ifdef CONFIG_X86_64  	struct desc_ptr dt;  	struct kvm_segment seg;  	unsigned long val; @@ -7493,10 +7493,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)  	for (i = 0; i < 6; i++)  		enter_smm_save_seg_64(vcpu, buf, i); -#else -	WARN_ON_ONCE(1); -#endif  } +#endif  static void enter_smm(struct kvm_vcpu *vcpu)  { @@ -7507,9 +7505,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)  	trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);  	memset(buf, 0, 512); +#ifdef CONFIG_X86_64  	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))  		enter_smm_save_state_64(vcpu, buf);  	else +#endif  		enter_smm_save_state_32(vcpu, buf);  	/* @@ -7567,8 +7567,10 @@ static void enter_smm(struct kvm_vcpu *vcpu)  	kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);  	kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); +#ifdef CONFIG_X86_64  	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))  		kvm_x86_ops->set_efer(vcpu, 0); +#endif  	kvm_update_cpuid(vcpu);  	kvm_mmu_reset_context(vcpu); @@ -7865,8 +7867,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)  		goto cancel_injection;  	} -	kvm_load_guest_xcr0(vcpu); -  	if (req_immediate_exit) {  		kvm_make_request(KVM_REQ_EVENT, vcpu);  		kvm_x86_ops->request_immediate_exit(vcpu); @@ -7919,8 +7919,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)  	vcpu->mode = OUTSIDE_GUEST_MODE;  	smp_wmb(); -	kvm_put_guest_xcr0(vcpu); -  	kvm_before_interrupt(vcpu);  	kvm_x86_ops->handle_external_intr(vcpu);  	kvm_after_interrupt(vcpu);  |