aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx/vmx.h
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2021-05-04 10:17:28 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2021-05-07 06:06:18 -0400
commitee9d22e08d1341692a43926e5e1d84c90a5dac1d (patch)
tree20952cdc574adad4b2af71d025903549c6e17139 /arch/x86/kvm/vmx/vmx.h
parentb6194b94a2ca4affce5aab1bbf773a977ad73671 (diff)
KVM: VMX: Use flag to indicate "active" uret MSRs instead of sorting list
Explicitly flag a uret MSR as needing to be loaded into hardware instead of resorting the list of "active" MSRs and tracking how many MSRs in total need to be loaded. The only benefit to sorting the list is that the loop to load MSRs during vmx_prepare_switch_to_guest() doesn't need to iterate over all supported uret MRS, only those that are active. But that is a pointless optimization, as the most common case, running a 64-bit guest, will load the vast majority of MSRs. Not to mention that a single WRMSR is far more expensive than iterating over the list. Providing a stable list order obviates the need to track a given MSR's "slot" in the per-CPU list of user return MSRs; all lists simply use the same ordering. Future patches will take advantage of the stable order to further simplify the related code. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20210504171734.1434054-10-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx/vmx.h')
-rw-r--r--arch/x86/kvm/vmx/vmx.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index d71ed8b425c5..16e4e457ba23 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -36,7 +36,7 @@ struct vmx_msrs {
};
struct vmx_uret_msr {
- unsigned int slot; /* The MSR's slot in kvm_user_return_msrs. */
+ bool load_into_hardware;
u64 data;
u64 mask;
};