From 12aad9164763bb4890277d3b2a4664f443a48e3f Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Fri, 17 Feb 2023 20:33:35 +0100 Subject: KVM: x86: Shrink struct kvm_pmu Move the 'version' member to the beginning of the structure to reuse an existing hole instead of introducing another one. This allows us to save 8 bytes for 64 bit builds. Signed-off-by: Mathias Krause Link: https://lore.kernel.org/r/20230217193336.15278-2-minipli@grsecurity.net Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a45de1118a42..a440ceae06fb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -515,6 +515,7 @@ struct kvm_pmc { #define MSR_ARCH_PERFMON_FIXED_CTR_MAX (MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_PMC_MAX_FIXED - 1) #define KVM_AMD_PMC_MAX_GENERIC 6 struct kvm_pmu { + u8 version; unsigned nr_arch_gp_counters; unsigned nr_arch_fixed_counters; unsigned available_event_types; @@ -527,7 +528,6 @@ struct kvm_pmu { u64 global_ovf_ctrl_mask; u64 reserved_bits; u64 raw_event_mask; - u8 version; struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC]; struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED]; struct irq_work irq_work; -- cgit From 7e768ce8278bafe43e2a4771a82b61856190a3fc Mon Sep 17 00:00:00 2001 From: Like Xu Date: Tue, 4 Apr 2023 15:17:59 +0800 Subject: KVM: x86/pmu: Zero out pmu->all_valid_pmc_idx each time it's refreshed The kvm_pmu_refresh() may be called repeatedly (e.g. configure guest CPUID repeatedly or update MSR_IA32_PERF_CAPABILITIES) and each call will use the last pmu->all_valid_pmc_idx value, with the residual bits introducing additional overhead later in the vPMU emulation. Fixes: b35e5548b411 ("KVM: x86/vPMU: Add lazy mechanism to release perf_event per vPMC") Suggested-by: Sean Christopherson Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20230404071759.75376-1-likexu@tencent.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/pmu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 612e6c70ce2e..29492c2a0c82 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -589,6 +589,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) */ void kvm_pmu_refresh(struct kvm_vcpu *vcpu) { + bitmap_zero(vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX); static_call(kvm_x86_pmu_refresh)(vcpu); } -- cgit From 098f4c061ea10b777033b71c10bd9fd706820ee9 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Sat, 28 Jan 2023 00:14:27 +0000 Subject: KVM: x86/pmu: Disallow legacy LBRs if architectural LBRs are available Disallow enabling LBR support if the CPU supports architectural LBRs. Traditional LBR support is absent on CPU models that have architectural LBRs, and KVM doesn't yet support arch LBRs, i.e. KVM will pass through non-existent MSRs if userspace enables LBRs for the guest. Cc: stable@vger.kernel.org Cc: Yang Weijiang Cc: Like Xu Reported-by: Paolo Bonzini Fixes: be635e34c284 ("KVM: vmx/pmu: Expose LBR_FMT in the MSR_IA32_PERF_CAPABILITIES") Tested-by: Like Xu Link: https://lore.kernel.org/r/20230128001427.2548858-1-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d7bf14abdba1..c18f74899f01 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7793,9 +7793,11 @@ static u64 vmx_get_perf_capabilities(void) if (boot_cpu_has(X86_FEATURE_PDCM)) rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap); - x86_perf_get_lbr(&lbr); - if (lbr.nr) - perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT; + if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) { + x86_perf_get_lbr(&lbr); + if (lbr.nr) + perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT; + } if (vmx_pebs_supported()) { perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK; -- cgit From b1932c5c19ddcbe9140b0583a0931b620c21ca02 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:45:58 -0800 Subject: KVM: x86: Rename kvm_init_msr_list() to clarify it inits multiple lists Rename kvm_init_msr_list() to kvm_init_msr_lists() to clarify that it initializes multiple lists: MSRs to save, emulated MSRs, and feature MSRs. No functional change intended. Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20230311004618.920745-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 237c483b1230..087497aebded 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7084,7 +7084,7 @@ static void kvm_probe_msr_to_save(u32 msr_index) msrs_to_save[num_msrs_to_save++] = msr_index; } -static void kvm_init_msr_list(void) +static void kvm_init_msr_lists(void) { unsigned i; @@ -9452,7 +9452,7 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) kvm_caps.max_guest_tsc_khz = max; } kvm_caps.default_tsc_scaling_ratio = 1ULL << kvm_caps.tsc_scaling_ratio_frac_bits; - kvm_init_msr_list(); + kvm_init_msr_lists(); return 0; out_unwind_ops: -- cgit From fb3146b4dc3bc6d0c0402a75f21d628eccf9bf8c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:45:59 -0800 Subject: KVM: x86: Add a helper to query whether or not a vCPU has ever run Add a helper to query if a vCPU has run so that KVM doesn't have to open code the check on last_vmentry_cpu being set to a magic value. No functional change intended. Suggested-by: Xiaoyao Li Cc: Like Xu Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20230311004618.920745-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/cpuid.c | 2 +- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/x86.h | 5 +++++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 9583a110cf5f..b736ddb42088 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -420,7 +420,7 @@ static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, * KVM_SET_CPUID{,2} again. To support this legacy behavior, check * whether the supplied CPUID data is equal to what's already set. */ - if (vcpu->arch.last_vmentry_cpu != -1) { + if (kvm_vcpu_has_run(vcpu)) { r = kvm_cpuid_check_equal(vcpu, e2, nent); if (r) return r; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 144c5a01cd77..8ced48797d59 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5393,7 +5393,7 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu) * Changing guest CPUID after KVM_RUN is forbidden, see the comment in * kvm_arch_vcpu_ioctl(). */ - KVM_BUG_ON(vcpu->arch.last_vmentry_cpu != -1, vcpu->kvm); + KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm); } void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index a8167b47b8c8..754190af1791 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -83,6 +83,11 @@ static inline unsigned int __shrink_ple_window(unsigned int val, void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu); int kvm_check_nested_events(struct kvm_vcpu *vcpu); +static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.last_vmentry_cpu != -1; +} + static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu) { return vcpu->arch.exception.pending || -- cgit From 5757f5b9562244e4b63f65576c16ca9eb21f81d2 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:00 -0800 Subject: KVM: x86: Add macros to track first...last VMX feature MSRs Add macros to track the range of VMX feature MSRs that are emulated by KVM to reduce the maintenance cost of extending the set of emulated MSRs. Note, KVM doesn't necessarily emulate all known/consumed VMX MSRs, e.g. PROCBASED_CTLS3 is consumed by KVM to enable IPI virtualization, but is not emulated as KVM doesn't emulate/virtualize IPI virtualization for nested guests. No functional change intended. Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20230311004618.920745-4-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/vmx/vmx.c | 8 ++++---- arch/x86/kvm/x86.h | 8 ++++++++ 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 70183d2271b5..7584eb85410b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4124,7 +4124,7 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) { switch (index) { case MSR_IA32_MCG_EXT_CTL: - case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: + case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: return false; case MSR_IA32_SMBASE: if (!IS_ENABLED(CONFIG_KVM_SMM)) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index c18f74899f01..e89340dfa322 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1945,7 +1945,7 @@ static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx, static int vmx_get_msr_feature(struct kvm_msr_entry *msr) { switch (msr->index) { - case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: + case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: if (!nested) return 1; return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data); @@ -2030,7 +2030,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash [msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0]; break; - case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: + case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: if (!nested_vmx_allowed(vcpu)) return 1; if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, @@ -2384,7 +2384,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmx->msr_ia32_sgxlepubkeyhash [msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data; break; - case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: + case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: if (!msr_info->host_initiated) return 1; /* they are read-only */ if (!nested_vmx_allowed(vcpu)) @@ -6974,7 +6974,7 @@ static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index) * real mode. */ return enable_unrestricted_guest || emulate_invalid_guest_state; - case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: + case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: return nested; case MSR_AMD64_VIRT_SPEC_CTRL: case MSR_AMD64_TSC_RATIO: diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 754190af1791..4bc483d082ee 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -40,6 +40,14 @@ void kvm_spurious_fault(void); failed; \ }) +/* + * The first...last VMX feature MSRs that are emulated by KVM. This may or may + * not cover all known VMX MSRs, as KVM doesn't emulate an MSR until there's an + * associated feature that KVM supports for nested virtualization. + */ +#define KVM_FIRST_EMULATED_VMX_MSR MSR_IA32_VMX_BASIC +#define KVM_LAST_EMULATED_VMX_MSR MSR_IA32_VMX_VMFUNC + #define KVM_DEFAULT_PLE_GAP 128 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 #define KVM_DEFAULT_PLE_WINDOW_GROW 2 -- cgit From 9eb6ba31db27253a11441368d2801c1eedc48b4f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:01 -0800 Subject: KVM: x86: Generate set of VMX feature MSRs using first/last definitions Add VMX MSRs to the runtime list of feature MSRs by iterating over the range of emulated MSRs instead of manually defining each MSR in the "all" list. Using the range definition reduces the cost of emulating a new VMX MSR, e.g. prevents forgetting to add an MSR to the list. Extracting the VMX MSRs from the "all" list, which is a compile-time constant, also shrinks the list to the point where the compiler can heavily optimize code that iterates over the list. No functional change intended. Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20230311004618.920745-5-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 53 ++++++++++++++++++++++------------------------------- 1 file changed, 22 insertions(+), 31 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 087497aebded..6b667c651951 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1543,36 +1543,19 @@ static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; static unsigned num_emulated_msrs; /* - * List of msr numbers which are used to expose MSR-based features that - * can be used by a hypervisor to validate requested CPU features. + * List of MSRs that control the existence of MSR-based features, i.e. MSRs + * that are effectively CPUID leafs. VMX MSRs are also included in the set of + * feature MSRs, but are handled separately to allow expedited lookups. */ -static const u32 msr_based_features_all[] = { - MSR_IA32_VMX_BASIC, - MSR_IA32_VMX_TRUE_PINBASED_CTLS, - MSR_IA32_VMX_PINBASED_CTLS, - MSR_IA32_VMX_TRUE_PROCBASED_CTLS, - MSR_IA32_VMX_PROCBASED_CTLS, - MSR_IA32_VMX_TRUE_EXIT_CTLS, - MSR_IA32_VMX_EXIT_CTLS, - MSR_IA32_VMX_TRUE_ENTRY_CTLS, - MSR_IA32_VMX_ENTRY_CTLS, - MSR_IA32_VMX_MISC, - MSR_IA32_VMX_CR0_FIXED0, - MSR_IA32_VMX_CR0_FIXED1, - MSR_IA32_VMX_CR4_FIXED0, - MSR_IA32_VMX_CR4_FIXED1, - MSR_IA32_VMX_VMCS_ENUM, - MSR_IA32_VMX_PROCBASED_CTLS2, - MSR_IA32_VMX_EPT_VPID_CAP, - MSR_IA32_VMX_VMFUNC, - +static const u32 msr_based_features_all_except_vmx[] = { MSR_AMD64_DE_CFG, MSR_IA32_UCODE_REV, MSR_IA32_ARCH_CAPABILITIES, MSR_IA32_PERF_CAPABILITIES, }; -static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)]; +static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) + + (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)]; static unsigned int num_msr_based_features; /* @@ -7009,6 +6992,18 @@ out: return r; } +static void kvm_probe_feature_msr(u32 msr_index) +{ + struct kvm_msr_entry msr = { + .index = msr_index, + }; + + if (kvm_get_msr_feature(&msr)) + return; + + msr_based_features[num_msr_based_features++] = msr_index; +} + static void kvm_probe_msr_to_save(u32 msr_index) { u32 dummy[2]; @@ -7110,15 +7105,11 @@ static void kvm_init_msr_lists(void) emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i]; } - for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) { - struct kvm_msr_entry msr; + for (i = KVM_FIRST_EMULATED_VMX_MSR; i <= KVM_LAST_EMULATED_VMX_MSR; i++) + kvm_probe_feature_msr(i); - msr.index = msr_based_features_all[i]; - if (kvm_get_msr_feature(&msr)) - continue; - - msr_based_features[num_msr_based_features++] = msr_based_features_all[i]; - } + for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) + kvm_probe_feature_msr(msr_based_features_all_except_vmx[i]); } static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, -- cgit From e4d86fb910dfdeb4320d5a7b9ebf6e81f10b1380 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:02 -0800 Subject: KVM: selftests: Split PMU caps sub-tests to avoid writing MSR after KVM_RUN Split the PERF_CAPABILITIES subtests into two parts so that the LBR format testcases don't execute after KVM_RUN. Similar to the guest CPUID model, KVM will soon disallow changing PERF_CAPABILITIES after KVM_RUN, at which point attempting to set the MSR after KVM_RUN will yield false positives and/or false negatives depending on what the test is trying to do. Land the LBR format test in a more generic "immutable features" test in anticipation of expanding its scope to other immutable features. Link: https://lore.kernel.org/r/20230311004618.920745-6-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/vmx_pmu_caps_test.c | 51 +++++++++++++--------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index c280ba1e6572..ac08c0fdd84d 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -41,24 +41,10 @@ static void guest_code(void) wrmsr(MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT); } -int main(int argc, char *argv[]) +static void test_fungible_perf_capabilities(union perf_capabilities host_cap) { - struct kvm_vm *vm; struct kvm_vcpu *vcpu; - int ret; - union perf_capabilities host_cap; - uint64_t val; - - host_cap.capabilities = kvm_get_feature_msr(MSR_IA32_PERF_CAPABILITIES); - host_cap.capabilities &= (PMU_CAP_FW_WRITES | PMU_CAP_LBR_FMT); - - /* Create VM */ - vm = vm_create_with_one_vcpu(&vcpu, guest_code); - - TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM)); - - TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION)); - TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0); + struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, guest_code); /* testcase 1, set capabilities when we have PDCM bit */ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES); @@ -70,7 +56,16 @@ int main(int argc, char *argv[]) vcpu_run(vcpu); ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES); - /* testcase 2, check valid LBR formats are accepted */ + kvm_vm_free(vm); +} + +static void test_immutable_perf_capabilities(union perf_capabilities host_cap) +{ + struct kvm_vcpu *vcpu; + struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL); + uint64_t val; + int ret; + vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0); ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), 0); @@ -78,8 +73,8 @@ int main(int argc, char *argv[]) ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format); /* - * Testcase 3, check that an "invalid" LBR format is rejected. Only an - * exact match of the host's format (and 0/disabled) is allowed. + * KVM only supports the host's native LBR format, as well as '0' (to + * disable LBR support). Verify KVM rejects all other LBR formats. */ for (val = 1; val <= PMU_CAP_LBR_FMT; val++) { if (val == (host_cap.capabilities & PMU_CAP_LBR_FMT)) @@ -88,7 +83,23 @@ int main(int argc, char *argv[]) ret = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, val); TEST_ASSERT(!ret, "Bad LBR FMT = 0x%lx didn't fail", val); } + kvm_vm_free(vm); +} + +int main(int argc, char *argv[]) +{ + union perf_capabilities host_cap; + + TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM)); + + TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION)); + TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0); + + host_cap.capabilities = kvm_get_feature_msr(MSR_IA32_PERF_CAPABILITIES); + host_cap.capabilities &= (PMU_CAP_FW_WRITES | PMU_CAP_LBR_FMT); + + test_fungible_perf_capabilities(host_cap); + test_immutable_perf_capabilities(host_cap); printf("Completed perf capability tests.\n"); - kvm_vm_free(vm); } -- cgit From 0094f62c7eaaaf53a011a4e46f9f32e5f3295e8c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:03 -0800 Subject: KVM: x86: Disallow writes to immutable feature MSRs after KVM_RUN Disallow writes to feature MSRs after KVM_RUN to prevent userspace from changing the vCPU model after running the vCPU. Similar to guest CPUID, KVM uses feature MSRs to configure intercepts, determine what operations are/aren't allowed, etc. Changing the capabilities while the vCPU is active will at best yield unpredictable guest behavior, and at worst could be dangerous to KVM. Allow writing the current value, e.g. so that userspace can blindly set all MSRs when emulating RESET, and unconditionally allow writes to MSR_IA32_UCODE_REV so that userspace can emulate patch loads. Special case the VMX MSRs to keep the generic list small, i.e. so that KVM can do a linear walk of the generic list without incurring meaningful overhead. Cc: Like Xu Cc: Yu Zhang Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20230311004618.920745-7-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6b667c651951..cf33b7d910a6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1558,6 +1558,25 @@ static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) + (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)]; static unsigned int num_msr_based_features; +/* + * All feature MSRs except uCode revID, which tracks the currently loaded uCode + * patch, are immutable once the vCPU model is defined. + */ +static bool kvm_is_immutable_feature_msr(u32 msr) +{ + int i; + + if (msr >= KVM_FIRST_EMULATED_VMX_MSR && msr <= KVM_LAST_EMULATED_VMX_MSR) + return true; + + for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) { + if (msr == msr_based_features_all_except_vmx[i]) + return msr != MSR_IA32_UCODE_REV; + } + + return false; +} + /* * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM * does not yet virtualize. These include: @@ -2175,6 +2194,22 @@ static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { + u64 val; + + /* + * Disallow writes to immutable feature MSRs after KVM_RUN. KVM does + * not support modifying the guest vCPU model on the fly, e.g. changing + * the nVMX capabilities while L2 is running is nonsensical. Ignore + * writes of the same value, e.g. to allow userspace to blindly stuff + * all MSRs when emulating RESET. + */ + if (kvm_vcpu_has_run(vcpu) && kvm_is_immutable_feature_msr(index)) { + if (do_get_msr(vcpu, index, &val) || *data != val) + return -EINVAL; + + return 0; + } + return kvm_set_msr_ignored_check(vcpu, index, *data, true); } -- cgit From 3a6de51a437fb4d2433f8a99fb59f43866cdbb98 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:04 -0800 Subject: KVM: x86/pmu: WARN and bug the VM if PMU is refreshed after vCPU has run Now that KVM disallows changing feature MSRs, i.e. PERF_CAPABILITIES, after running a vCPU, WARN and bug the VM if the PMU is refreshed after the vCPU has run. Note, KVM has disallowed CPUID updates after running a vCPU since commit feb627e8d6f6 ("KVM: x86: Forbid KVM_SET_CPUID{,2} after KVM_RUN"), i.e. PERF_CAPABILITIES was the only remaining way to trigger a PMU refresh after KVM_RUN. Cc: Like Xu Link: https://lore.kernel.org/r/20230311004618.920745-8-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/pmu.c | 3 +++ arch/x86/kvm/x86.c | 10 +++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 29492c2a0c82..136184fe9e92 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -589,6 +589,9 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) */ void kvm_pmu_refresh(struct kvm_vcpu *vcpu) { + if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm)) + return; + bitmap_zero(vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX); static_call(kvm_x86_pmu_refresh)(vcpu); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cf33b7d910a6..a86ad45a53b8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3632,9 +3632,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (data & ~kvm_caps.supported_perf_cap) return 1; + /* + * Note, this is not just a performance optimization! KVM + * disallows changing feature MSRs after the vCPU has run; PMU + * refresh will bug the VM if called after the vCPU has run. + */ + if (vcpu->arch.perf_capabilities == data) + break; + vcpu->arch.perf_capabilities = data; kvm_pmu_refresh(vcpu); - return 0; + break; case MSR_EFER: return set_efer(vcpu, msr_info); case MSR_K7_HWCR: -- cgit From 957d0f70e97bb2257d8bf746ff2f524b793751b3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:05 -0800 Subject: KVM: x86/pmu: Zero out LBR capabilities during PMU refresh Zero out the LBR capabilities during PMU refresh to avoid exposing LBRs to the guest against userspace's wishes. If userspace modifies the guest's CPUID model or invokes KVM_CAP_PMU_CAPABILITY to disable vPMU after an initial KVM_SET_CPUID2, but before the first KVM_RUN, KVM will retain the previous LBR info due to bailing before refreshing the LBR descriptor. Note, this is a very theoretical bug, there is no known use case where a VMM would deliberately enable the vPMU via KVM_SET_CPUID2, and then later disable the vPMU. Link: https://lore.kernel.org/r/20230311004618.920745-9-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/pmu_intel.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index e8a3be0b9df9..d889bb2a1de5 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -531,6 +531,16 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) pmu->pebs_enable_mask = ~0ull; pmu->pebs_data_cfg_mask = ~0ull; + memset(&lbr_desc->records, 0, sizeof(lbr_desc->records)); + + /* + * Setting passthrough of LBR MSRs is done only in the VM-Entry loop, + * and PMU refresh is disallowed after the vCPU has run, i.e. this code + * should never be reached while KVM is passing through MSRs. + */ + if (KVM_BUG_ON(lbr_desc->msr_passthrough, vcpu->kvm)) + return; + entry = kvm_find_cpuid_entry(vcpu, 0xa); if (!entry || !vcpu->kvm->arch.enable_pmu) return; -- cgit From 710fb612672e0c05fe3f56fb4e81fae22ef492a2 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:06 -0800 Subject: KVM: selftests: Move 0/initial value PERF_CAPS checks to dedicated sub-test Use a separate sub-test to verify userspace can clear PERF_CAPABILITIES and restore it to the KVM-supported value, as the testcase isn't unique to the LBR format. Link: https://lore.kernel.org/r/20230311004618.920745-10-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/vmx_pmu_caps_test.c | 25 ++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index ac08c0fdd84d..c3b0738e361b 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -41,6 +41,24 @@ static void guest_code(void) wrmsr(MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT); } +/* + * Verify KVM allows writing PERF_CAPABILITIES with all KVM-supported features + * enabled, as well as '0' (to disable all features). + */ +static void test_basic_perf_capabilities(union perf_capabilities host_cap) +{ + struct kvm_vcpu *vcpu; + struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL); + + vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0); + ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), 0); + + vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); + ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), host_cap.capabilities); + + kvm_vm_free(vm); +} + static void test_fungible_perf_capabilities(union perf_capabilities host_cap) { struct kvm_vcpu *vcpu; @@ -66,12 +84,6 @@ static void test_immutable_perf_capabilities(union perf_capabilities host_cap) uint64_t val; int ret; - vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0); - ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), 0); - - vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.lbr_format); - ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format); - /* * KVM only supports the host's native LBR format, as well as '0' (to * disable LBR support). Verify KVM rejects all other LBR formats. @@ -98,6 +110,7 @@ int main(int argc, char *argv[]) host_cap.capabilities = kvm_get_feature_msr(MSR_IA32_PERF_CAPABILITIES); host_cap.capabilities &= (PMU_CAP_FW_WRITES | PMU_CAP_LBR_FMT); + test_basic_perf_capabilities(host_cap); test_fungible_perf_capabilities(host_cap); test_immutable_perf_capabilities(host_cap); -- cgit From b1b705627cb3c23333e04637d7a90833b560584e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:07 -0800 Subject: KVM: selftests: Assert that full-width PMC writes are supported if PDCM=1 KVM emulates full-width PMC writes in software, assert that KVM reports full-width writes as supported if PERF_CAPABILITIES is supported. Link: https://lore.kernel.org/r/20230311004618.920745-11-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index c3b0738e361b..035470b38400 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -110,6 +110,9 @@ int main(int argc, char *argv[]) host_cap.capabilities = kvm_get_feature_msr(MSR_IA32_PERF_CAPABILITIES); host_cap.capabilities &= (PMU_CAP_FW_WRITES | PMU_CAP_LBR_FMT); + TEST_ASSERT(host_cap.full_width_write, + "Full-width writes should always be supported"); + test_basic_perf_capabilities(host_cap); test_fungible_perf_capabilities(host_cap); test_immutable_perf_capabilities(host_cap); -- cgit From 22234c2495eafd6b8f1f26c6c6adc12aca60fd7f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:08 -0800 Subject: KVM: selftests: Print out failing MSR and value in vcpu_set_msr() Reimplement vcpu_set_msr() as a macro and pretty print the failing MSR (when possible) and the value if KVM_SET_MSRS fails instead of using the using the standard KVM_IOCTL_ERROR(). KVM_SET_MSRS is somewhat odd in that it returns the index of the last successful write, i.e. will be '0' on failure barring an entirely different KVM bug. And for writing MSRs, the MSR being written and the value being written are almost always relevant to the failure, i.e. just saying "failed!" doesn't help debug. Place the string goo in a separate macro in anticipation of using it to further expand MSR testing. Link: https://lore.kernel.org/r/20230311004618.920745-12-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/include/x86_64/processor.h | 32 ++++++++++++++++------ 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 90387ddcb2a9..293a9085311d 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -928,14 +928,30 @@ static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu, uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index); int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value); -static inline void vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, - uint64_t msr_value) -{ - int r = _vcpu_set_msr(vcpu, msr_index, msr_value); - - TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_SET_MSRS, r)); -} - +/* + * Assert on an MSR access(es) and pretty print the MSR name when possible. + * Note, the caller provides the stringified name so that the name of macro is + * printed, not the value the macro resolves to (due to macro expansion). + */ +#define TEST_ASSERT_MSR(cond, fmt, msr, str, args...) \ +do { \ + if (__builtin_constant_p(msr)) { \ + TEST_ASSERT(cond, fmt, str, args); \ + } else if (!(cond)) { \ + char buf[16]; \ + \ + snprintf(buf, sizeof(buf), "MSR 0x%x", msr); \ + TEST_ASSERT(cond, fmt, buf, args); \ + } \ +} while (0) + +#define vcpu_set_msr(vcpu, msr, val) \ +do { \ + uint64_t v = val; \ + \ + TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1, \ + "KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v); \ +} while (0) void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits); bool vm_is_unrestricted_guest(struct kvm_vm *vm); -- cgit From f138258565d18f0f9a8d07f04162c68eee997ce6 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:09 -0800 Subject: KVM: selftests: Verify KVM preserves userspace writes to "durable" MSRs Assert that KVM provides "read what you wrote" semantics for all "durable" MSRs (for lack of a better name). The extra coverage is cheap from a runtime performance perspective, and verifying the behavior in the common helper avoids gratuitous copy+paste in individual tests. Note, this affects all tests that set MSRs from userspace! Link: https://lore.kernel.org/r/20230311004618.920745-13-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/x86_64/processor.h | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 293a9085311d..e1d65d933310 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -945,12 +945,27 @@ do { \ } \ } while (0) +/* + * Returns true if KVM should return the last written value when reading an MSR + * from userspace, e.g. the MSR isn't a command MSR, doesn't emulate state that + * is changing, etc. This is NOT an exhaustive list! The intent is to filter + * out MSRs that are not durable _and_ that a selftest wants to write. + */ +static inline bool is_durable_msr(uint32_t msr) +{ + return msr != MSR_IA32_TSC; +} + #define vcpu_set_msr(vcpu, msr, val) \ do { \ - uint64_t v = val; \ + uint64_t r, v = val; \ \ TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1, \ "KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v); \ + if (!is_durable_msr(msr)) \ + break; \ + r = vcpu_get_msr(vcpu, msr); \ + TEST_ASSERT_MSR(r == v, "Set %s to '0x%lx', got back '0x%lx'", msr, #msr, v, r);\ } while (0) void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits); -- cgit From 69713940d2b4de34a6923fc202e1796dc2f79604 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:10 -0800 Subject: KVM: selftests: Drop now-redundant checks on PERF_CAPABILITIES writes Now that vcpu_set_msr() verifies the expected "read what was wrote" semantics of all durable MSRs, including PERF_CAPABILITIES, drop the now-redundant manual checks in the VMX PMU caps test. Link: https://lore.kernel.org/r/20230311004618.920745-14-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index 035470b38400..f7a27b5c949b 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -51,10 +51,7 @@ static void test_basic_perf_capabilities(union perf_capabilities host_cap) struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL); vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0); - ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), 0); - vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); - ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), host_cap.capabilities); kvm_vm_free(vm); } @@ -67,9 +64,6 @@ static void test_fungible_perf_capabilities(union perf_capabilities host_cap) /* testcase 1, set capabilities when we have PDCM bit */ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES); - /* check capabilities can be retrieved with KVM_GET_MSR */ - ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES); - /* check whatever we write with KVM_SET_MSR is _not_ modified */ vcpu_run(vcpu); ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES); -- cgit From 37f4e79c43e5750e75b3de8726b4c21bae6ab7aa Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:11 -0800 Subject: KVM: selftests: Test all fungible features in PERF_CAPABILITIES Verify that userspace can set all fungible features in PERF_CAPABILITIES. Drop the now unused #define of the "full-width writes" flag. Link: https://lore.kernel.org/r/20230311004618.920745-15-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/vmx_pmu_caps_test.c | 29 ++++++++++++++++++---- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index f7a27b5c949b..2647282ff380 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -14,10 +14,11 @@ #define _GNU_SOURCE /* for program_invocation_short_name */ #include +#include + #include "kvm_util.h" #include "vmx.h" -#define PMU_CAP_FW_WRITES (1ULL << 13) #define PMU_CAP_LBR_FMT 0x3f union perf_capabilities { @@ -36,6 +37,18 @@ union perf_capabilities { u64 capabilities; }; +/* + * The LBR format and most PEBS features are immutable, all other features are + * fungible (if supported by the host and KVM). + */ +static const union perf_capabilities immutable_caps = { + .lbr_format = -1, + .pebs_trap = 1, + .pebs_arch_reg = 1, + .pebs_format = -1, + .pebs_baseline = 1, +}; + static void guest_code(void) { wrmsr(MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT); @@ -58,15 +71,22 @@ static void test_basic_perf_capabilities(union perf_capabilities host_cap) static void test_fungible_perf_capabilities(union perf_capabilities host_cap) { + const uint64_t fungible_caps = host_cap.capabilities & ~immutable_caps.capabilities; + struct kvm_vcpu *vcpu; struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, guest_code); + int bit; - /* testcase 1, set capabilities when we have PDCM bit */ - vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES); + for_each_set_bit(bit, &fungible_caps, 64) { + vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, BIT_ULL(bit)); + vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, + host_cap.capabilities & ~BIT_ULL(bit)); + } + vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); /* check whatever we write with KVM_SET_MSR is _not_ modified */ vcpu_run(vcpu); - ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES); + ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), host_cap.capabilities); kvm_vm_free(vm); } @@ -102,7 +122,6 @@ int main(int argc, char *argv[]) TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0); host_cap.capabilities = kvm_get_feature_msr(MSR_IA32_PERF_CAPABILITIES); - host_cap.capabilities &= (PMU_CAP_FW_WRITES | PMU_CAP_LBR_FMT); TEST_ASSERT(host_cap.full_width_write, "Full-width writes should always be supported"); -- cgit From a2a34d148e75c242fd059e8c7ab31e88396013b6 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:12 -0800 Subject: KVM: selftests: Test all immutable non-format bits in PERF_CAPABILITIES Add negative testing of all immutable bits in PERF_CAPABILITIES, i.e. single bits that are reserved-0 or are effectively reserved-1 by KVM. Omit LBR and PEBS format bits from the test as it's easier to test them manually than it is to add safeguards to the comment path, e.g. toggling a single bit can yield a format of '0', which is legal as a "disable" value. Link: https://lore.kernel.org/r/20230311004618.920745-16-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/vmx_pmu_caps_test.c | 30 +++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index 2647282ff380..d91bf44a2e39 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -49,6 +49,11 @@ static const union perf_capabilities immutable_caps = { .pebs_baseline = 1, }; +static const union perf_capabilities format_caps = { + .lbr_format = -1, + .pebs_format = -1, +}; + static void guest_code(void) { wrmsr(MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT); @@ -91,12 +96,30 @@ static void test_fungible_perf_capabilities(union perf_capabilities host_cap) kvm_vm_free(vm); } +/* + * Verify KVM rejects attempts to set unsupported and/or immutable features in + * PERF_CAPABILITIES. Note, LBR format and PEBS format need to be validated + * separately as they are multi-bit values, e.g. toggling or setting a single + * bit can generate a false positive without dedicated safeguards. + */ static void test_immutable_perf_capabilities(union perf_capabilities host_cap) { + const uint64_t reserved_caps = (~host_cap.capabilities | + immutable_caps.capabilities) & + ~format_caps.capabilities; + struct kvm_vcpu *vcpu; struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL); uint64_t val; - int ret; + int r, bit; + + for_each_set_bit(bit, &reserved_caps, 64) { + r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, + host_cap.capabilities ^ BIT_ULL(bit)); + TEST_ASSERT(!r, "%s immutable feature 0x%llx (bit %d) didn't fail", + host_cap.capabilities & BIT_ULL(bit) ? "Setting" : "Clearing", + BIT_ULL(bit), bit); + } /* * KVM only supports the host's native LBR format, as well as '0' (to @@ -106,9 +129,10 @@ static void test_immutable_perf_capabilities(union perf_capabilities host_cap) if (val == (host_cap.capabilities & PMU_CAP_LBR_FMT)) continue; - ret = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, val); - TEST_ASSERT(!ret, "Bad LBR FMT = 0x%lx didn't fail", val); + r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, val); + TEST_ASSERT(!r, "Bad LBR FMT = 0x%lx didn't fail", val); } + kvm_vm_free(vm); } -- cgit From baa36dac6ca8ecd709dafb87a79d3bead40e48fe Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:13 -0800 Subject: KVM: selftests: Expand negative testing of guest writes to PERF_CAPABILITIES Test that the guest can't write 0 to PERF_CAPABILITIES, can't write the current value, and can't toggle _any_ bits. There is no reason to special case the LBR format. Link: https://lore.kernel.org/r/20230311004618.920745-17-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/vmx_pmu_caps_test.c | 61 +++++++++++++++++++--- 1 file changed, 54 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index d91bf44a2e39..44fc6101a547 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -54,9 +54,59 @@ static const union perf_capabilities format_caps = { .pebs_format = -1, }; -static void guest_code(void) +static void guest_code(uint64_t current_val) { - wrmsr(MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT); + uint8_t vector; + int i; + + vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, current_val); + GUEST_ASSERT_2(vector == GP_VECTOR, current_val, vector); + + vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, 0); + GUEST_ASSERT_2(vector == GP_VECTOR, 0, vector); + + for (i = 0; i < 64; i++) { + vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, + current_val ^ BIT_ULL(i)); + GUEST_ASSERT_2(vector == GP_VECTOR, + current_val ^ BIT_ULL(i), vector); + } + + GUEST_DONE(); +} + +/* + * Verify that guest WRMSRs to PERF_CAPABILITIES #GP regardless of the value + * written, that the guest always sees the userspace controlled value, and that + * PERF_CAPABILITIES is immutable after KVM_RUN. + */ +static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap) +{ + struct kvm_vcpu *vcpu; + struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, guest_code); + struct ucall uc; + + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(vcpu); + + vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); + + vcpu_args_set(vcpu, 1, host_cap.capabilities); + vcpu_run(vcpu); + + switch (get_ucall(vcpu, &uc)) { + case UCALL_ABORT: + REPORT_GUEST_ASSERT_2(uc, "val = 0x%lx, vector = %lu"); + break; + case UCALL_DONE: + break; + default: + TEST_FAIL("Unexpected ucall: %lu", uc.cmd); + } + + ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), host_cap.capabilities); + + kvm_vm_free(vm); } /* @@ -79,7 +129,7 @@ static void test_fungible_perf_capabilities(union perf_capabilities host_cap) const uint64_t fungible_caps = host_cap.capabilities & ~immutable_caps.capabilities; struct kvm_vcpu *vcpu; - struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, guest_code); + struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL); int bit; for_each_set_bit(bit, &fungible_caps, 64) { @@ -89,10 +139,6 @@ static void test_fungible_perf_capabilities(union perf_capabilities host_cap) } vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); - /* check whatever we write with KVM_SET_MSR is _not_ modified */ - vcpu_run(vcpu); - ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), host_cap.capabilities); - kvm_vm_free(vm); } @@ -153,6 +199,7 @@ int main(int argc, char *argv[]) test_basic_perf_capabilities(host_cap); test_fungible_perf_capabilities(host_cap); test_immutable_perf_capabilities(host_cap); + test_guest_wrmsr_perf_capabilities(host_cap); printf("Completed perf capability tests.\n"); } -- cgit From 81fd92411264e4f7b982bb6066b4bc04311bc48a Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:14 -0800 Subject: KVM: selftests: Test post-KVM_RUN writes to PERF_CAPABILITIES Now that KVM disallows changing PERF_CAPABILITIES after KVM_RUN, expand the host side checks to verify KVM rejects any attempts to change bits from userspace. Link: https://lore.kernel.org/r/20230311004618.920745-18-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index 44fc6101a547..6fc86f5eba0b 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -85,6 +85,7 @@ static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap) struct kvm_vcpu *vcpu; struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, guest_code); struct ucall uc; + int r, i; vm_init_descriptor_tables(vm); vcpu_init_descriptor_tables(vcpu); @@ -106,6 +107,18 @@ static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap) ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), host_cap.capabilities); + vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); + + r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0); + TEST_ASSERT(!r, "Post-KVM_RUN write '0' didn't fail"); + + for (i = 0; i < 64; i++) { + r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, + host_cap.capabilities ^ BIT_ULL(i)); + TEST_ASSERT(!r, "Post-KVM_RUN write '0x%llx'didn't fail", + host_cap.capabilities ^ BIT_ULL(i)); + } + kvm_vm_free(vm); } -- cgit From bc7bb0082960d030171e11ed4e4b518950415625 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:15 -0800 Subject: KVM: selftests: Drop "all done!" printf() from PERF_CAPABILITIES test Drop the arbitrary "done" message from the VMX PMU caps test, it's pretty obvious the test is done when the process exits. Link: https://lore.kernel.org/r/20230311004618.920745-19-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index 6fc86f5eba0b..6733d879a00b 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -213,6 +213,4 @@ int main(int argc, char *argv[]) test_fungible_perf_capabilities(host_cap); test_immutable_perf_capabilities(host_cap); test_guest_wrmsr_perf_capabilities(host_cap); - - printf("Completed perf capability tests.\n"); } -- cgit From 8ac2f774b9ead749720fe7a648e6c6ada57e01f3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:16 -0800 Subject: KVM: selftests: Refactor LBR_FMT test to avoid use of separate macro Rework the LBR format test to use the bitfield instead of a separate mask macro, mainly so that adding a nearly-identical PEBS format test doesn't have to copy-paste-tweak the macro too. No functional change intended. Link: https://lore.kernel.org/r/20230311004618.920745-20-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index 6733d879a00b..38aec88d733b 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -19,8 +19,6 @@ #include "kvm_util.h" #include "vmx.h" -#define PMU_CAP_LBR_FMT 0x3f - union perf_capabilities { struct { u64 lbr_format:6; @@ -169,7 +167,7 @@ static void test_immutable_perf_capabilities(union perf_capabilities host_cap) struct kvm_vcpu *vcpu; struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL); - uint64_t val; + union perf_capabilities val = host_cap; int r, bit; for_each_set_bit(bit, &reserved_caps, 64) { @@ -184,12 +182,13 @@ static void test_immutable_perf_capabilities(union perf_capabilities host_cap) * KVM only supports the host's native LBR format, as well as '0' (to * disable LBR support). Verify KVM rejects all other LBR formats. */ - for (val = 1; val <= PMU_CAP_LBR_FMT; val++) { - if (val == (host_cap.capabilities & PMU_CAP_LBR_FMT)) + for (val.lbr_format = 1; val.lbr_format; val.lbr_format++) { + if (val.lbr_format == host_cap.lbr_format) continue; - r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, val); - TEST_ASSERT(!r, "Bad LBR FMT = 0x%lx didn't fail", val); + r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, val.capabilities); + TEST_ASSERT(!r, "Bad LBR FMT = 0x%x didn't fail, host = 0x%x", + val.lbr_format, host_cap.lbr_format); } kvm_vm_free(vm); -- cgit From 8b95b4155523dcb5412a73c631c680098bcbbb40 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:17 -0800 Subject: KVM: selftests: Add negative testcase for PEBS format in PERF_CAPABILITIES Expand the immutable features sub-test for PERF_CAPABILITIES to verify KVM rejects any attempt to use a PEBS format other than the host's. Link: https://lore.kernel.org/r/20230311004618.920745-21-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index 38aec88d733b..29aaa0419294 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -191,6 +191,16 @@ static void test_immutable_perf_capabilities(union perf_capabilities host_cap) val.lbr_format, host_cap.lbr_format); } + /* Ditto for the PEBS format. */ + for (val.pebs_format = 1; val.pebs_format; val.pebs_format++) { + if (val.pebs_format == host_cap.pebs_format) + continue; + + r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, val.capabilities); + TEST_ASSERT(!r, "Bad PEBS FMT = 0x%x didn't fail, host = 0x%x", + val.pebs_format, host_cap.pebs_format); + } + kvm_vm_free(vm); } -- cgit From d8f992e9fde8dc23b6fe649fa1b0ed5c123738fe Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:18 -0800 Subject: KVM: selftests: Verify LBRs are disabled if vPMU is disabled Verify that disabling the guest's vPMU via CPUID also disables LBRs. KVM has had at least one bug where LBRs would remain enabled even though the intent was to disable everything PMU related. Link: https://lore.kernel.org/r/20230311004618.920745-22-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/vmx_pmu_caps_test.c | 29 ++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c index 29aaa0419294..3009b3e5254d 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c @@ -204,6 +204,34 @@ static void test_immutable_perf_capabilities(union perf_capabilities host_cap) kvm_vm_free(vm); } +/* + * Test that LBR MSRs are writable when LBRs are enabled, and then verify that + * disabling the vPMU via CPUID also disables LBR support. Set bits 2:0 of + * LBR_TOS as those bits are writable across all uarch implementations (arch + * LBRs will need to poke a different MSR). + */ +static void test_lbr_perf_capabilities(union perf_capabilities host_cap) +{ + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + int r; + + if (!host_cap.lbr_format) + return; + + vm = vm_create_with_one_vcpu(&vcpu, NULL); + + vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); + vcpu_set_msr(vcpu, MSR_LBR_TOS, 7); + + vcpu_clear_cpuid_entry(vcpu, X86_PROPERTY_PMU_VERSION.function); + + r = _vcpu_set_msr(vcpu, MSR_LBR_TOS, 7); + TEST_ASSERT(!r, "Writing LBR_TOS should fail after disabling vPMU"); + + kvm_vm_free(vm); +} + int main(int argc, char *argv[]) { union perf_capabilities host_cap; @@ -222,4 +250,5 @@ int main(int argc, char *argv[]) test_fungible_perf_capabilities(host_cap); test_immutable_perf_capabilities(host_cap); test_guest_wrmsr_perf_capabilities(host_cap); + test_lbr_perf_capabilities(host_cap); } -- cgit From cdd2fbf6360e84c02ac4cea11733083c2ca8dace Mon Sep 17 00:00:00 2001 From: Like Xu Date: Tue, 14 Feb 2023 13:07:46 +0800 Subject: KVM: x86/pmu: Rename pmc_is_enabled() to pmc_is_globally_enabled() The name of function pmc_is_enabled() is a bit misleading. A PMC can be disabled either by PERF_CLOBAL_CTRL or by its corresponding EVTSEL. Append global semantics to its name. Suggested-by: Jim Mattson Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20230214050757.9623-2-likexu@tencent.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/pmu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 136184fe9e92..35d80151b853 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -93,7 +93,7 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops) #undef __KVM_X86_PMU_OP } -static inline bool pmc_is_enabled(struct kvm_pmc *pmc) +static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc) { return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc); } @@ -409,7 +409,7 @@ static void reprogram_counter(struct kvm_pmc *pmc) pmc_pause_counter(pmc); - if (!pmc_speculative_in_use(pmc) || !pmc_is_enabled(pmc)) + if (!pmc_speculative_in_use(pmc) || !pmc_is_globally_enabled(pmc)) goto reprogram_complete; if (!check_pmu_event_filter(pmc)) @@ -688,7 +688,7 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id) for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) { pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i); - if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc)) + if (!pmc || !pmc_is_globally_enabled(pmc) || !pmc_speculative_in_use(pmc)) continue; /* Ignore checks for edge detect, pin control, invert and CMASK bits */ -- cgit From 8bca8c5ce40b03862d782b9991da146909199327 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 26 Jan 2023 17:08:03 -0800 Subject: KVM: VMX: Refactor intel_pmu_{g,}set_msr() to align with other helpers Invert the flows in intel_pmu_{g,s}et_msr()'s case statements so that they follow the kernel's preferred style of: if () return return which is also the style used by every other {g,s}et_msr() helper (except AMD's PMU variant, which doesn't use a switch statement). Modify the "set" paths with costly side effects, i.e. that reprogram counters, to skip only the side effects, i.e. to perform reserved bits checks even if the value is unchanged. None of the reserved bits checks are expensive, so there's no strong justification for skipping them, and guarding only the side effect makes it slightly more obvious what is being skipped and why. No functional change intended (assuming no reserved bit bugs). Link: https://lkml.kernel.org/r/Y%2B6cfen%2FCpO3%2FdLO%40google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/pmu_intel.c | 109 ++++++++++++++++++++++--------------------- 1 file changed, 57 insertions(+), 52 deletions(-) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index d889bb2a1de5..c45bd10f80a1 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -351,45 +351,47 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr) { case MSR_CORE_PERF_FIXED_CTR_CTRL: msr_info->data = pmu->fixed_ctr_ctrl; - return 0; + break; case MSR_CORE_PERF_GLOBAL_STATUS: msr_info->data = pmu->global_status; - return 0; + break; case MSR_CORE_PERF_GLOBAL_CTRL: msr_info->data = pmu->global_ctrl; - return 0; + break; case MSR_CORE_PERF_GLOBAL_OVF_CTRL: msr_info->data = 0; - return 0; + break; case MSR_IA32_PEBS_ENABLE: msr_info->data = pmu->pebs_enable; - return 0; + break; case MSR_IA32_DS_AREA: msr_info->data = pmu->ds_area; - return 0; + break; case MSR_PEBS_DATA_CFG: msr_info->data = pmu->pebs_data_cfg; - return 0; + break; default: if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { u64 val = pmc_read_counter(pmc); msr_info->data = val & pmu->counter_bitmask[KVM_PMC_GP]; - return 0; + break; } else if ((pmc = get_fixed_pmc(pmu, msr))) { u64 val = pmc_read_counter(pmc); msr_info->data = val & pmu->counter_bitmask[KVM_PMC_FIXED]; - return 0; + break; } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { msr_info->data = pmc->eventsel; - return 0; - } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) - return 0; + break; + } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) { + break; + } + return 1; } - return 1; + return 0; } static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) @@ -402,44 +404,43 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr) { case MSR_CORE_PERF_FIXED_CTR_CTRL: - if (pmu->fixed_ctr_ctrl == data) - return 0; - if (!(data & pmu->fixed_ctr_ctrl_mask)) { + if (data & pmu->fixed_ctr_ctrl_mask) + return 1; + + if (pmu->fixed_ctr_ctrl != data) reprogram_fixed_counters(pmu, data); - return 0; - } break; case MSR_CORE_PERF_GLOBAL_STATUS: - if (msr_info->host_initiated) { - pmu->global_status = data; - return 0; - } - break; /* RO MSR */ + if (!msr_info->host_initiated) + return 1; /* RO MSR */ + + pmu->global_status = data; + break; case MSR_CORE_PERF_GLOBAL_CTRL: - if (pmu->global_ctrl == data) - return 0; - if (kvm_valid_perf_global_ctrl(pmu, data)) { + if (!kvm_valid_perf_global_ctrl(pmu, data)) + return 1; + + if (pmu->global_ctrl != data) { diff = pmu->global_ctrl ^ data; pmu->global_ctrl = data; reprogram_counters(pmu, diff); - return 0; } break; case MSR_CORE_PERF_GLOBAL_OVF_CTRL: - if (!(data & pmu->global_ovf_ctrl_mask)) { - if (!msr_info->host_initiated) - pmu->global_status &= ~data; - return 0; - } + if (data & pmu->global_ovf_ctrl_mask) + return 1; + + if (!msr_info->host_initiated) + pmu->global_status &= ~data; break; case MSR_IA32_PEBS_ENABLE: - if (pmu->pebs_enable == data) - return 0; - if (!(data & pmu->pebs_enable_mask)) { + if (data & pmu->pebs_enable_mask) + return 1; + + if (pmu->pebs_enable != data) { diff = pmu->pebs_enable ^ data; pmu->pebs_enable = data; reprogram_counters(pmu, diff); - return 0; } break; case MSR_IA32_DS_AREA: @@ -447,15 +448,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; if (is_noncanonical_address(data, vcpu)) return 1; + pmu->ds_area = data; - return 0; + break; case MSR_PEBS_DATA_CFG: - if (pmu->pebs_data_cfg == data) - return 0; - if (!(data & pmu->pebs_data_cfg_mask)) { - pmu->pebs_data_cfg = data; - return 0; - } + if (data & pmu->pebs_data_cfg_mask) + return 1; + + pmu->pebs_data_cfg = data; break; default: if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || @@ -463,33 +463,38 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if ((msr & MSR_PMC_FULL_WIDTH_BIT) && (data & ~pmu->counter_bitmask[KVM_PMC_GP])) return 1; + if (!msr_info->host_initiated && !(msr & MSR_PMC_FULL_WIDTH_BIT)) data = (s64)(s32)data; pmc->counter += data - pmc_read_counter(pmc); pmc_update_sample_period(pmc); - return 0; + break; } else if ((pmc = get_fixed_pmc(pmu, msr))) { pmc->counter += data - pmc_read_counter(pmc); pmc_update_sample_period(pmc); - return 0; + break; } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { - if (data == pmc->eventsel) - return 0; reserved_bits = pmu->reserved_bits; if ((pmc->idx == 2) && (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED)) reserved_bits ^= HSW_IN_TX_CHECKPOINTED; - if (!(data & reserved_bits)) { + if (data & reserved_bits) + return 1; + + if (data != pmc->eventsel) { pmc->eventsel = data; kvm_pmu_request_counter_reprogam(pmc); - return 0; } - } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) - return 0; + break; + } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) { + break; + } + /* Not a known PMU MSR. */ + return 1; } - return 1; + return 0; } static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu) -- cgit From 649bccd7fac98225525c79cf4b1cecc4bafdfc54 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Tue, 14 Feb 2023 13:07:48 +0800 Subject: KVM: x86/pmu: Rewrite reprogram_counters() to improve performance A valid pmc is always tested before using pmu->reprogram_pmi. Eliminate this part of the redundancy by setting the counter's bitmask directly, and in addition, trigger KVM_REQ_PMU only once to save more cpu cycles. Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20230214050757.9623-4-likexu@tencent.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/pmu_intel.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index c45bd10f80a1..eb291dfbe4aa 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -76,13 +76,13 @@ static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) static void reprogram_counters(struct kvm_pmu *pmu, u64 diff) { int bit; - struct kvm_pmc *pmc; - for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) { - pmc = intel_pmc_idx_to_pmc(pmu, bit); - if (pmc) - kvm_pmu_request_counter_reprogam(pmc); - } + if (!diff) + return; + + for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) + set_bit(bit, pmu->reprogram_pmi); + kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu)); } static bool intel_hw_event_available(struct kvm_pmc *pmc) -- cgit From 4fa5843d81fdce9ba7e03f8a666e2f8e592aec2b Mon Sep 17 00:00:00 2001 From: Like Xu Date: Fri, 10 Mar 2023 19:33:49 +0800 Subject: KVM: x86/pmu: Fix a typo in kvm_pmu_request_counter_reprogam() Fix a "reprogam" => "reprogram" typo in kvm_pmu_request_counter_reprogam(). Fixes: 68fb4757e867 ("KVM: x86/pmu: Defer reprogram_counter() to kvm_pmu_handle_event()") Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20230310113349.31799-1-likexu@tencent.com [sean: trim the changelog] Signed-off-by: Sean Christopherson --- arch/x86/kvm/pmu.c | 2 +- arch/x86/kvm/pmu.h | 2 +- arch/x86/kvm/svm/pmu.c | 2 +- arch/x86/kvm/vmx/pmu_intel.c | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 35d80151b853..2226db9601e2 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -650,7 +650,7 @@ static void kvm_pmu_incr_counter(struct kvm_pmc *pmc) { pmc->prev_counter = pmc->counter; pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc); - kvm_pmu_request_counter_reprogam(pmc); + kvm_pmu_request_counter_reprogram(pmc); } static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc, diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index be62c16f2265..5c7bbf03b599 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -195,7 +195,7 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops) KVM_PMC_MAX_FIXED); } -static inline void kvm_pmu_request_counter_reprogam(struct kvm_pmc *pmc) +static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc) { set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); kvm_make_request(KVM_REQ_PMU, pmc->vcpu); diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index cc77a0681800..5fa939e411d8 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -161,7 +161,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) data &= ~pmu->reserved_bits; if (data != pmc->eventsel) { pmc->eventsel = data; - kvm_pmu_request_counter_reprogam(pmc); + kvm_pmu_request_counter_reprogram(pmc); } return 0; } diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index eb291dfbe4aa..741efe2c497b 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -57,7 +57,7 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); - kvm_pmu_request_counter_reprogam(pmc); + kvm_pmu_request_counter_reprogram(pmc); } } @@ -484,7 +484,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (data != pmc->eventsel) { pmc->eventsel = data; - kvm_pmu_request_counter_reprogam(pmc); + kvm_pmu_request_counter_reprogram(pmc); } break; } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) { -- cgit From dfdeda67ea2dac57d2d7506d65cfe5a0878ad285 Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Tue, 7 Mar 2023 14:13:56 +0000 Subject: KVM: x86/pmu: Prevent the PMU from counting disallowed events When counting "Instructions Retired" (0xc0) in a guest, KVM will occasionally increment the PMU counter regardless of if that event is being filtered. This is because some PMU events are incremented via kvm_pmu_trigger_event(), which doesn't know about the event filter. Add the event filter to kvm_pmu_trigger_event(), so events that are disallowed do not increment their counters. Fixes: 9cd803d496e7 ("KVM: x86: Update vPMCs when retiring instructions") Signed-off-by: Aaron Lewis Reviewed-by: Like Xu Link: https://lore.kernel.org/r/20230307141400.1486314-2-aaronlewis@google.com [sean: prepend "pmc" to the new function] Signed-off-by: Sean Christopherson --- arch/x86/kvm/pmu.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 2226db9601e2..597a8f8f90b9 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -400,6 +400,12 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc) return is_fixed_event_allowed(filter, pmc->idx); } +static bool pmc_event_is_allowed(struct kvm_pmc *pmc) +{ + return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) && + check_pmu_event_filter(pmc); +} + static void reprogram_counter(struct kvm_pmc *pmc) { struct kvm_pmu *pmu = pmc_to_pmu(pmc); @@ -409,10 +415,7 @@ static void reprogram_counter(struct kvm_pmc *pmc) pmc_pause_counter(pmc); - if (!pmc_speculative_in_use(pmc) || !pmc_is_globally_enabled(pmc)) - goto reprogram_complete; - - if (!check_pmu_event_filter(pmc)) + if (!pmc_event_is_allowed(pmc)) goto reprogram_complete; if (pmc->counter < pmc->prev_counter) @@ -688,7 +691,7 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id) for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) { pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i); - if (!pmc || !pmc_is_globally_enabled(pmc) || !pmc_speculative_in_use(pmc)) + if (!pmc || !pmc_event_is_allowed(pmc)) continue; /* Ignore checks for edge detect, pin control, invert and CMASK bits */ -- cgit From 33ef1411a36b47ae7ecdb919463b0d78576b3832 Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Fri, 7 Apr 2023 16:32:49 -0700 Subject: KVM: selftests: Add a common helper for the PMU event filter guest code Split out the common parts of the Intel and AMD guest code in the PMU event filter test into a helper function. This is in preparation for adding additional counters to the test. No functional changes intended. Signed-off-by: Aaron Lewis Link: https://lore.kernel.org/r/20230407233254.957013-2-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_event_filter_test.c | 29 ++++++++++++++-------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 2feef25ba691..13eca9357252 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -100,6 +100,15 @@ static void check_msr(uint32_t msr, uint64_t bits_to_flip) GUEST_SYNC(0); } +static uint64_t run_and_measure_loop(uint32_t msr_base) +{ + uint64_t branches_retired = rdmsr(msr_base + 0); + + __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); + + return rdmsr(msr_base + 0) - branches_retired; +} + static void intel_guest_code(void) { check_msr(MSR_CORE_PERF_GLOBAL_CTRL, 1); @@ -108,16 +117,15 @@ static void intel_guest_code(void) GUEST_SYNC(1); for (;;) { - uint64_t br0, br1; + uint64_t count; wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED); - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 1); - br0 = rdmsr(MSR_IA32_PMC0); - __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); - br1 = rdmsr(MSR_IA32_PMC0); - GUEST_SYNC(br1 - br0); + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x1); + + count = run_and_measure_loop(MSR_IA32_PMC0); + GUEST_SYNC(count); } } @@ -133,15 +141,14 @@ static void amd_guest_code(void) GUEST_SYNC(1); for (;;) { - uint64_t br0, br1; + uint64_t count; wrmsr(MSR_K7_EVNTSEL0, 0); wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED); - br0 = rdmsr(MSR_K7_PERFCTR0); - __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); - br1 = rdmsr(MSR_K7_PERFCTR0); - GUEST_SYNC(br1 - br0); + + count = run_and_measure_loop(MSR_K7_PERFCTR0); + GUEST_SYNC(count); } } -- cgit From fa32233d51b9d26369b8371e986c1030c4201fae Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Fri, 7 Apr 2023 16:32:50 -0700 Subject: KVM: selftests: Add helpers for PMC asserts in PMU event filter test Add helper macros to consolidate the asserts that a PMC is/isn't counting (branch) instructions retired. This will make it easier to add additional asserts related to counting instructions later on. No functional changes intended. Signed-off-by: Aaron Lewis [sean: add "INSTRUCTIONS", massage changelog] Link: https://lore.kernel.org/r/20230407233254.957013-3-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_event_filter_test.c | 52 +++++++++++----------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 13eca9357252..20a1f4fc2f48 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -244,14 +244,27 @@ static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f, return f; } +#define ASSERT_PMC_COUNTING_INSTRUCTIONS(count) \ +do { \ + if (count != NUM_BRANCHES) \ + pr_info("%s: Branch instructions retired = %lu (expected %u)\n", \ + __func__, count, NUM_BRANCHES); \ + TEST_ASSERT(count, "Allowed PMU event is not counting."); \ +} while (0) + +#define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count) \ +do { \ + if (count) \ + pr_info("%s: Branch instructions retired = %lu (expected 0)\n", \ + __func__, count); \ + TEST_ASSERT(!count, "Disallowed PMU Event is counting"); \ +} while (0) + static void test_without_filter(struct kvm_vcpu *vcpu) { uint64_t count = run_vcpu_to_sync(vcpu); - if (count != NUM_BRANCHES) - pr_info("%s: Branch instructions retired = %lu (expected %u)\n", - __func__, count, NUM_BRANCHES); - TEST_ASSERT(count, "Allowed PMU event is not counting"); + ASSERT_PMC_COUNTING_INSTRUCTIONS(count); } static uint64_t test_with_filter(struct kvm_vcpu *vcpu, @@ -269,12 +282,9 @@ static void test_amd_deny_list(struct kvm_vcpu *vcpu) f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0); count = test_with_filter(vcpu, f); - free(f); - if (count != NUM_BRANCHES) - pr_info("%s: Branch instructions retired = %lu (expected %u)\n", - __func__, count, NUM_BRANCHES); - TEST_ASSERT(count, "Allowed PMU event is not counting"); + + ASSERT_PMC_COUNTING_INSTRUCTIONS(count); } static void test_member_deny_list(struct kvm_vcpu *vcpu) @@ -283,10 +293,8 @@ static void test_member_deny_list(struct kvm_vcpu *vcpu) uint64_t count = test_with_filter(vcpu, f); free(f); - if (count) - pr_info("%s: Branch instructions retired = %lu (expected 0)\n", - __func__, count); - TEST_ASSERT(!count, "Disallowed PMU Event is counting"); + + ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count); } static void test_member_allow_list(struct kvm_vcpu *vcpu) @@ -295,10 +303,8 @@ static void test_member_allow_list(struct kvm_vcpu *vcpu) uint64_t count = test_with_filter(vcpu, f); free(f); - if (count != NUM_BRANCHES) - pr_info("%s: Branch instructions retired = %lu (expected %u)\n", - __func__, count, NUM_BRANCHES); - TEST_ASSERT(count, "Allowed PMU event is not counting"); + + ASSERT_PMC_COUNTING_INSTRUCTIONS(count); } static void test_not_member_deny_list(struct kvm_vcpu *vcpu) @@ -310,10 +316,8 @@ static void test_not_member_deny_list(struct kvm_vcpu *vcpu) remove_event(f, AMD_ZEN_BR_RETIRED); count = test_with_filter(vcpu, f); free(f); - if (count != NUM_BRANCHES) - pr_info("%s: Branch instructions retired = %lu (expected %u)\n", - __func__, count, NUM_BRANCHES); - TEST_ASSERT(count, "Allowed PMU event is not counting"); + + ASSERT_PMC_COUNTING_INSTRUCTIONS(count); } static void test_not_member_allow_list(struct kvm_vcpu *vcpu) @@ -325,10 +329,8 @@ static void test_not_member_allow_list(struct kvm_vcpu *vcpu) remove_event(f, AMD_ZEN_BR_RETIRED); count = test_with_filter(vcpu, f); free(f); - if (count) - pr_info("%s: Branch instructions retired = %lu (expected 0)\n", - __func__, count); - TEST_ASSERT(!count, "Disallowed PMU Event is counting"); + + ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count); } /* -- cgit From c140e93a0c11ac5a56834ac11ddb0f777307b2c1 Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Fri, 7 Apr 2023 16:32:51 -0700 Subject: KVM: selftests: Print detailed info in PMU event filter asserts Provide the actual vs. expected count in the PMU event filter test's asserts instead of relying on pr_info() to provide the context, e.g. so that all information needed to triage a failure is readily available even if the environment in which the test is run captures only the assert itself. Signed-off-by: Aaron Lewis [sean: rewrite changelog] Link: https://lore.kernel.org/r/20230407233254.957013-4-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 20a1f4fc2f48..79feec24ae73 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -246,18 +246,17 @@ static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f, #define ASSERT_PMC_COUNTING_INSTRUCTIONS(count) \ do { \ - if (count != NUM_BRANCHES) \ + if (count && count != NUM_BRANCHES) \ pr_info("%s: Branch instructions retired = %lu (expected %u)\n", \ __func__, count, NUM_BRANCHES); \ - TEST_ASSERT(count, "Allowed PMU event is not counting."); \ + TEST_ASSERT(count, "%s: Branch instructions retired = %lu (expected > 0)", \ + __func__, count); \ } while (0) #define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count) \ do { \ - if (count) \ - pr_info("%s: Branch instructions retired = %lu (expected 0)\n", \ - __func__, count); \ - TEST_ASSERT(!count, "Disallowed PMU Event is counting"); \ + TEST_ASSERT(!count, "%s: Branch instructions retired = %lu (expected 0)", \ + __func__, count); \ } while (0) static void test_without_filter(struct kvm_vcpu *vcpu) -- cgit From c02c74428288282c9bd5fd37fe4135f3ca419a86 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 7 Apr 2023 16:32:52 -0700 Subject: KVM: selftests: Use error codes to signal errors in PMU event filter test Use '0' to signal success and '-errno' to signal failure in the PMU event filter test so that the values are slightly less magical/arbitrary. Using '0' in the error paths is especially confusing as understanding it's an error value requires following the breadcrumbs to the host code that ultimately consumes the value. Arguably there should also be a #define for "success", but 0/-errno is a common enough pattern that defining another macro on top would likely do more harm than good. Link: https://lore.kernel.org/r/20230407233254.957013-5-seanjc@google.com Reviewed by: Aaron Lewis Signed-off-by: Sean Christopherson --- .../testing/selftests/kvm/x86_64/pmu_event_filter_test.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 79feec24ae73..0329c439b684 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -77,7 +77,7 @@ static const uint64_t event_list[] = { */ static void guest_gp_handler(struct ex_regs *regs) { - GUEST_SYNC(0); + GUEST_SYNC(-EFAULT); } /* @@ -92,12 +92,12 @@ static void check_msr(uint32_t msr, uint64_t bits_to_flip) wrmsr(msr, v); if (rdmsr(msr) != v) - GUEST_SYNC(0); + GUEST_SYNC(-EIO); v ^= bits_to_flip; wrmsr(msr, v); if (rdmsr(msr) != v) - GUEST_SYNC(0); + GUEST_SYNC(-EIO); } static uint64_t run_and_measure_loop(uint32_t msr_base) @@ -114,7 +114,7 @@ static void intel_guest_code(void) check_msr(MSR_CORE_PERF_GLOBAL_CTRL, 1); check_msr(MSR_P6_EVNTSEL0, 0xffff); check_msr(MSR_IA32_PMC0, 0xffff); - GUEST_SYNC(1); + GUEST_SYNC(0); for (;;) { uint64_t count; @@ -138,7 +138,7 @@ static void amd_guest_code(void) { check_msr(MSR_K7_EVNTSEL0, 0xffff); check_msr(MSR_K7_PERFCTR0, 0xffff); - GUEST_SYNC(1); + GUEST_SYNC(0); for (;;) { uint64_t count; @@ -178,13 +178,13 @@ static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu) */ static bool sanity_check_pmu(struct kvm_vcpu *vcpu) { - bool success; + uint64_t r; vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler); - success = run_vcpu_to_sync(vcpu); + r = run_vcpu_to_sync(vcpu); vm_install_exception_handler(vcpu->vm, GP_VECTOR, NULL); - return success; + return !r; } static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents) -- cgit From e9f322bd23960026275014477e21f7a9445fd926 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 7 Apr 2023 16:32:53 -0700 Subject: KVM: selftests: Copy full counter values from guest in PMU event filter test Use a single struct to track all PMC event counts in the PMU filter test, and copy the full struct to/from the guest when running and measuring each guest workload. Using a common struct avoids naming conflicts, e.g. the loads/stores testcase has claimed "perf_counter", and eliminates the unnecessary truncation of the counter values when they are propagated from the guest MSRs to the host structs. Zero the struct before running the guest workload to ensure that the test doesn't get a false pass due to consuming data from a previous run. Link: https://lore.kernel.org/r/20230407233254.957013-6-seanjc@google.com Reviewed by: Aaron Lewis Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_event_filter_test.c | 170 ++++++++++----------- 1 file changed, 80 insertions(+), 90 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 0329c439b684..58d2e17c7304 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -71,6 +71,13 @@ static const uint64_t event_list[] = { AMD_ZEN_BR_RETIRED, }; +struct { + uint64_t loads; + uint64_t stores; + uint64_t loads_stores; + uint64_t branches_retired; +} pmc_results; + /* * If we encounter a #GP during the guest PMU sanity check, then the guest * PMU is not functional. Inform the hypervisor via GUEST_SYNC(0). @@ -100,13 +107,13 @@ static void check_msr(uint32_t msr, uint64_t bits_to_flip) GUEST_SYNC(-EIO); } -static uint64_t run_and_measure_loop(uint32_t msr_base) +static void run_and_measure_loop(uint32_t msr_base) { - uint64_t branches_retired = rdmsr(msr_base + 0); + const uint64_t branches_retired = rdmsr(msr_base + 0); __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); - return rdmsr(msr_base + 0) - branches_retired; + pmc_results.branches_retired = rdmsr(msr_base + 0) - branches_retired; } static void intel_guest_code(void) @@ -117,15 +124,13 @@ static void intel_guest_code(void) GUEST_SYNC(0); for (;;) { - uint64_t count; - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x1); - count = run_and_measure_loop(MSR_IA32_PMC0); - GUEST_SYNC(count); + run_and_measure_loop(MSR_IA32_PMC0); + GUEST_SYNC(0); } } @@ -141,14 +146,12 @@ static void amd_guest_code(void) GUEST_SYNC(0); for (;;) { - uint64_t count; - wrmsr(MSR_K7_EVNTSEL0, 0); wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED); - count = run_and_measure_loop(MSR_K7_PERFCTR0); - GUEST_SYNC(count); + run_and_measure_loop(MSR_K7_PERFCTR0); + GUEST_SYNC(0); } } @@ -168,6 +171,19 @@ static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu) return uc.args[1]; } +static void run_vcpu_and_sync_pmc_results(struct kvm_vcpu *vcpu) +{ + uint64_t r; + + memset(&pmc_results, 0, sizeof(pmc_results)); + sync_global_to_guest(vcpu->vm, pmc_results); + + r = run_vcpu_to_sync(vcpu); + TEST_ASSERT(!r, "Unexpected sync value: 0x%lx", r); + + sync_global_from_guest(vcpu->vm, pmc_results); +} + /* * In a nested environment or if the vPMU is disabled, the guest PMU * might not work as architected (accessing the PMU MSRs may raise @@ -244,92 +260,93 @@ static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f, return f; } -#define ASSERT_PMC_COUNTING_INSTRUCTIONS(count) \ +#define ASSERT_PMC_COUNTING_INSTRUCTIONS() \ do { \ - if (count && count != NUM_BRANCHES) \ + uint64_t br = pmc_results.branches_retired; \ + \ + if (br && br != NUM_BRANCHES) \ pr_info("%s: Branch instructions retired = %lu (expected %u)\n", \ - __func__, count, NUM_BRANCHES); \ - TEST_ASSERT(count, "%s: Branch instructions retired = %lu (expected > 0)", \ - __func__, count); \ + __func__, br, NUM_BRANCHES); \ + TEST_ASSERT(br, "%s: Branch instructions retired = %lu (expected > 0)", \ + __func__, br); \ } while (0) -#define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count) \ +#define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS() \ do { \ - TEST_ASSERT(!count, "%s: Branch instructions retired = %lu (expected 0)", \ - __func__, count); \ + uint64_t br = pmc_results.branches_retired; \ + \ + TEST_ASSERT(!br, "%s: Branch instructions retired = %lu (expected 0)", \ + __func__, br); \ } while (0) static void test_without_filter(struct kvm_vcpu *vcpu) { - uint64_t count = run_vcpu_to_sync(vcpu); + run_vcpu_and_sync_pmc_results(vcpu); - ASSERT_PMC_COUNTING_INSTRUCTIONS(count); + ASSERT_PMC_COUNTING_INSTRUCTIONS(); } -static uint64_t test_with_filter(struct kvm_vcpu *vcpu, - struct kvm_pmu_event_filter *f) +static void test_with_filter(struct kvm_vcpu *vcpu, + struct kvm_pmu_event_filter *f) { vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f); - return run_vcpu_to_sync(vcpu); + run_vcpu_and_sync_pmc_results(vcpu); } static void test_amd_deny_list(struct kvm_vcpu *vcpu) { uint64_t event = EVENT(0x1C2, 0); struct kvm_pmu_event_filter *f; - uint64_t count; f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0); - count = test_with_filter(vcpu, f); + test_with_filter(vcpu, f); free(f); - ASSERT_PMC_COUNTING_INSTRUCTIONS(count); + ASSERT_PMC_COUNTING_INSTRUCTIONS(); } static void test_member_deny_list(struct kvm_vcpu *vcpu) { struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY); - uint64_t count = test_with_filter(vcpu, f); + test_with_filter(vcpu, f); free(f); - ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count); + ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(); } static void test_member_allow_list(struct kvm_vcpu *vcpu) { struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW); - uint64_t count = test_with_filter(vcpu, f); + test_with_filter(vcpu, f); free(f); - ASSERT_PMC_COUNTING_INSTRUCTIONS(count); + ASSERT_PMC_COUNTING_INSTRUCTIONS(); } static void test_not_member_deny_list(struct kvm_vcpu *vcpu) { struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY); - uint64_t count; remove_event(f, INTEL_BR_RETIRED); remove_event(f, AMD_ZEN_BR_RETIRED); - count = test_with_filter(vcpu, f); + test_with_filter(vcpu, f); free(f); - ASSERT_PMC_COUNTING_INSTRUCTIONS(count); + ASSERT_PMC_COUNTING_INSTRUCTIONS(); } static void test_not_member_allow_list(struct kvm_vcpu *vcpu) { struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW); - uint64_t count; remove_event(f, INTEL_BR_RETIRED); remove_event(f, AMD_ZEN_BR_RETIRED); - count = test_with_filter(vcpu, f); + test_with_filter(vcpu, f); free(f); - ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count); + ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(); } /* @@ -458,51 +475,30 @@ static bool supports_event_mem_inst_retired(void) #define EXCLUDE_MASKED_ENTRY(event_select, mask, match) \ KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, true) -struct perf_counter { - union { - uint64_t raw; - struct { - uint64_t loads:22; - uint64_t stores:22; - uint64_t loads_stores:20; - }; - }; -}; - -static uint64_t masked_events_guest_test(uint32_t msr_base) +static void masked_events_guest_test(uint32_t msr_base) { - uint64_t ld0, ld1, st0, st1, ls0, ls1; - struct perf_counter c; - int val; - /* - * The acutal value of the counters don't determine the outcome of + * The actual value of the counters don't determine the outcome of * the test. Only that they are zero or non-zero. */ - ld0 = rdmsr(msr_base + 0); - st0 = rdmsr(msr_base + 1); - ls0 = rdmsr(msr_base + 2); + const uint64_t loads = rdmsr(msr_base + 0); + const uint64_t stores = rdmsr(msr_base + 1); + const uint64_t loads_stores = rdmsr(msr_base + 2); + int val; + __asm__ __volatile__("movl $0, %[v];" "movl %[v], %%eax;" "incl %[v];" : [v]"+m"(val) :: "eax"); - ld1 = rdmsr(msr_base + 0); - st1 = rdmsr(msr_base + 1); - ls1 = rdmsr(msr_base + 2); - - c.loads = ld1 - ld0; - c.stores = st1 - st0; - c.loads_stores = ls1 - ls0; - - return c.raw; + pmc_results.loads = rdmsr(msr_base + 0) - loads; + pmc_results.stores = rdmsr(msr_base + 1) - stores; + pmc_results.loads_stores = rdmsr(msr_base + 2) - loads_stores; } static void intel_masked_events_guest_code(void) { - uint64_t r; - for (;;) { wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); @@ -515,16 +511,13 @@ static void intel_masked_events_guest_code(void) wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x7); - r = masked_events_guest_test(MSR_IA32_PMC0); - - GUEST_SYNC(r); + masked_events_guest_test(MSR_IA32_PMC0); + GUEST_SYNC(0); } } static void amd_masked_events_guest_code(void) { - uint64_t r; - for (;;) { wrmsr(MSR_K7_EVNTSEL0, 0); wrmsr(MSR_K7_EVNTSEL1, 0); @@ -537,26 +530,22 @@ static void amd_masked_events_guest_code(void) wrmsr(MSR_K7_EVNTSEL2, ARCH_PERFMON_EVENTSEL_ENABLE | ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_LOAD_STORE); - r = masked_events_guest_test(MSR_K7_PERFCTR0); - - GUEST_SYNC(r); + masked_events_guest_test(MSR_K7_PERFCTR0); + GUEST_SYNC(0); } } -static struct perf_counter run_masked_events_test(struct kvm_vcpu *vcpu, - const uint64_t masked_events[], - const int nmasked_events) +static void run_masked_events_test(struct kvm_vcpu *vcpu, + const uint64_t masked_events[], + const int nmasked_events) { struct kvm_pmu_event_filter *f; - struct perf_counter r; f = create_pmu_event_filter(masked_events, nmasked_events, KVM_PMU_EVENT_ALLOW, KVM_PMU_EVENT_FLAG_MASKED_EVENTS); - r.raw = test_with_filter(vcpu, f); + test_with_filter(vcpu, f); free(f); - - return r; } /* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */ @@ -681,7 +670,6 @@ static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events, int nevents) { int ntests = ARRAY_SIZE(test_cases); - struct perf_counter c; int i, n; for (i = 0; i < ntests; i++) { @@ -693,13 +681,15 @@ static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events, n = append_test_events(test, events, nevents); - c = run_masked_events_test(vcpu, events, n); - TEST_ASSERT(bool_eq(c.loads, test->flags & ALLOW_LOADS) && - bool_eq(c.stores, test->flags & ALLOW_STORES) && - bool_eq(c.loads_stores, + run_masked_events_test(vcpu, events, n); + + TEST_ASSERT(bool_eq(pmc_results.loads, test->flags & ALLOW_LOADS) && + bool_eq(pmc_results.stores, test->flags & ALLOW_STORES) && + bool_eq(pmc_results.loads_stores, test->flags & ALLOW_LOADS_STORES), - "%s loads: %u, stores: %u, loads + stores: %u", - test->msg, c.loads, c.stores, c.loads_stores); + "%s loads: %lu, stores: %lu, loads + stores: %lu", + test->msg, pmc_results.loads, pmc_results.stores, + pmc_results.loads_stores); } } -- cgit From 457bd7af1a17182e7f1f97eeb5d9107f8699e99d Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Fri, 7 Apr 2023 16:32:54 -0700 Subject: KVM: selftests: Test the PMU event "Instructions retired" Add testing for the event "Instructions retired" (0xc0) in the PMU event filter on both Intel and AMD to ensure that the event doesn't count when it is disallowed. Unlike most of the other events, the event "Instructions retired" will be incremented by KVM when an instruction is emulated. Test that this case is being properly handled and that KVM doesn't increment the counter when that event is disallowed. Signed-off-by: Aaron Lewis Link: https://lore.kernel.org/r/20230307141400.1486314-6-aaronlewis@google.com Link: https://lore.kernel.org/r/20230407233254.957013-7-seanjc@google.com Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_event_filter_test.c | 34 ++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 58d2e17c7304..8cec5c8aca8a 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -54,6 +54,21 @@ #define AMD_ZEN_BR_RETIRED EVENT(0xc2, 0) + +/* + * "Retired instructions", from Processor Programming Reference + * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors, + * Preliminary Processor Programming Reference (PPR) for AMD Family + * 17h Model 31h, Revision B0 Processors, and Preliminary Processor + * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision + * B1 Processors Volume 1 of 2. + * --- and --- + * "Instructions retired", from the Intel SDM, volume 3, + * "Pre-defined Architectural Performance Events." + */ + +#define INST_RETIRED EVENT(0xc0, 0) + /* * This event list comprises Intel's eight architectural events plus * AMD's "retired branch instructions" for Zen[123] (and possibly @@ -61,7 +76,7 @@ */ static const uint64_t event_list[] = { EVENT(0x3c, 0), - EVENT(0xc0, 0), + INST_RETIRED, EVENT(0x3c, 1), EVENT(0x2e, 0x4f), EVENT(0x2e, 0x41), @@ -76,6 +91,7 @@ struct { uint64_t stores; uint64_t loads_stores; uint64_t branches_retired; + uint64_t instructions_retired; } pmc_results; /* @@ -110,10 +126,12 @@ static void check_msr(uint32_t msr, uint64_t bits_to_flip) static void run_and_measure_loop(uint32_t msr_base) { const uint64_t branches_retired = rdmsr(msr_base + 0); + const uint64_t insn_retired = rdmsr(msr_base + 1); __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); pmc_results.branches_retired = rdmsr(msr_base + 0) - branches_retired; + pmc_results.instructions_retired = rdmsr(msr_base + 1) - insn_retired; } static void intel_guest_code(void) @@ -127,7 +145,9 @@ static void intel_guest_code(void) wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED); - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x1); + wrmsr(MSR_P6_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE | + ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED); + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x3); run_and_measure_loop(MSR_IA32_PMC0); GUEST_SYNC(0); @@ -149,6 +169,8 @@ static void amd_guest_code(void) wrmsr(MSR_K7_EVNTSEL0, 0); wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED); + wrmsr(MSR_K7_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE | + ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED); run_and_measure_loop(MSR_K7_PERFCTR0); GUEST_SYNC(0); @@ -263,20 +285,26 @@ static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f, #define ASSERT_PMC_COUNTING_INSTRUCTIONS() \ do { \ uint64_t br = pmc_results.branches_retired; \ + uint64_t ir = pmc_results.instructions_retired; \ \ if (br && br != NUM_BRANCHES) \ pr_info("%s: Branch instructions retired = %lu (expected %u)\n", \ __func__, br, NUM_BRANCHES); \ TEST_ASSERT(br, "%s: Branch instructions retired = %lu (expected > 0)", \ __func__, br); \ + TEST_ASSERT(ir, "%s: Instructions retired = %lu (expected > 0)", \ + __func__, ir); \ } while (0) #define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS() \ do { \ uint64_t br = pmc_results.branches_retired; \ + uint64_t ir = pmc_results.instructions_retired; \ \ TEST_ASSERT(!br, "%s: Branch instructions retired = %lu (expected 0)", \ __func__, br); \ + TEST_ASSERT(!ir, "%s: Instructions retired = %lu (expected 0)", \ + __func__, ir); \ } while (0) static void test_without_filter(struct kvm_vcpu *vcpu) @@ -329,6 +357,7 @@ static void test_not_member_deny_list(struct kvm_vcpu *vcpu) { struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY); + remove_event(f, INST_RETIRED); remove_event(f, INTEL_BR_RETIRED); remove_event(f, AMD_ZEN_BR_RETIRED); test_with_filter(vcpu, f); @@ -341,6 +370,7 @@ static void test_not_member_allow_list(struct kvm_vcpu *vcpu) { struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW); + remove_event(f, INST_RETIRED); remove_event(f, INTEL_BR_RETIRED); remove_event(f, AMD_ZEN_BR_RETIRED); test_with_filter(vcpu, f); -- cgit