diff options
author | Sean Christopherson <[email protected]> | 2021-06-09 11:56:15 -0700 |
---|---|---|
committer | Paolo Bonzini <[email protected]> | 2021-06-17 13:09:34 -0400 |
commit | dc87275f47332be922d4eb299595523cc3a97479 (patch) | |
tree | 7294f42481a965f9d47b6609da37b53ff72d3508 | |
parent | fa75e08bbe4f8ea609f61bbb6c04b3bb2b38c793 (diff) |
KVM: x86: Move (most) SMM hflags modifications into kvm_smm_changed()
Move the core of SMM hflags modifications into kvm_smm_changed() and use
kvm_smm_changed() in enter_smm(). Clear HF_SMM_INSIDE_NMI_MASK for
leaving SMM but do not set it for entering SMM. If the vCPU is executing
outside of SMM, the flag should unequivocally be cleared, e.g. this
technically fixes a benign bug where the flag could be left set after
KVM_SET_VCPU_EVENTS, but the reverse is not true as NMI blocking depends
on pre-SMM state or userspace input.
Note, this adds an extra kvm_mmu_reset_context() to enter_smm(). The
extra/early reset isn't strictly necessary, and in a way can never be
necessary since the vCPU/MMU context is in a half-baked state until the
final context reset at the end of the function. But, enter_smm() is not
a hot path, and exploding on an invalid root_hpa is probably better than
having a stale SMM flag in the MMU role; it's at least no worse.
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
-rw-r--r-- | arch/x86/kvm/x86.c | 24 |
1 files changed, 11 insertions, 13 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 774f2e7bedae..57efc3a49753 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4532,7 +4532,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, memset(&events->reserved, 0, sizeof(events->reserved)); } -static void kvm_smm_changed(struct kvm_vcpu *vcpu); +static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm); static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) @@ -4592,13 +4592,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.apic->sipi_vector = events->sipi_vector; if (events->flags & KVM_VCPUEVENT_VALID_SMM) { - if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { - if (events->smi.smm) - vcpu->arch.hflags |= HF_SMM_MASK; - else - vcpu->arch.hflags &= ~HF_SMM_MASK; - kvm_smm_changed(vcpu); - } + if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) + kvm_smm_changed(vcpu, events->smi.smm); vcpu->arch.smi_pending = events->smi.pending; @@ -7218,8 +7213,7 @@ static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK); - kvm_smm_changed(vcpu); + kvm_smm_changed(vcpu, false); } static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, @@ -7548,9 +7542,13 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, static int complete_emulated_mmio(struct kvm_vcpu *vcpu); static int complete_emulated_pio(struct kvm_vcpu *vcpu); -static void kvm_smm_changed(struct kvm_vcpu *vcpu) +static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm) { - if (!(vcpu->arch.hflags & HF_SMM_MASK)) { + if (entering_smm) { + vcpu->arch.hflags |= HF_SMM_MASK; + } else { + vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK); + /* This is a good place to trace that we are exiting SMM. */ trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false); @@ -9022,7 +9020,7 @@ static void enter_smm(struct kvm_vcpu *vcpu) */ static_call(kvm_x86_pre_enter_smm)(vcpu, buf); - vcpu->arch.hflags |= HF_SMM_MASK; + kvm_smm_changed(vcpu, true); kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); if (static_call(kvm_x86_get_nmi_mask)(vcpu)) |