OSDN Git Service

KVM: x86/pmu: Cap kvm_pmu_cap.num_counters_gp at KVM's internal max
authorSean Christopherson <seanjc@google.com>
Tue, 24 Jan 2023 23:49:00 +0000 (23:49 +0000)
committerSean Christopherson <seanjc@google.com>
Fri, 27 Jan 2023 02:03:42 +0000 (18:03 -0800)
Limit kvm_pmu_cap.num_counters_gp during kvm_init_pmu_capability() based
on the vendor PMU capabilities so that consuming num_counters_gp naturally
does the right thing.  This fixes a mostly theoretical bug where KVM could
over-report its PMU support in KVM_GET_SUPPORTED_CPUID for leaf 0xA, e.g.
if the number of counters reported by perf is greater than KVM's
hardcoded internal limit.  Incorporating input from the AMD PMU also
avoids over-reporting MSRs to save when running on AMD.

Link: https://lore.kernel.org/r/20230124234905.3774678-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/pmu.h
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/x86.c

index 080a3bb..79988da 100644 (file)
@@ -36,6 +36,7 @@ struct kvm_pmu_ops {
        void (*cleanup)(struct kvm_vcpu *vcpu);
 
        const u64 EVENTSEL_EVENT;
+       const int MAX_NR_GP_COUNTERS;
 };
 
 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
@@ -157,7 +158,7 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
 
 extern struct x86_pmu_capability kvm_pmu_cap;
 
-static inline void kvm_init_pmu_capability(void)
+static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
 {
        bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
 
@@ -176,6 +177,8 @@ static inline void kvm_init_pmu_capability(void)
        }
 
        kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
+       kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
+                                         pmu_ops->MAX_NR_GP_COUNTERS);
        kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
                                             KVM_PMC_MAX_FIXED);
 }
index 5da8c29..cc77a06 100644 (file)
@@ -232,4 +232,5 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = {
        .init = amd_pmu_init,
        .reset = amd_pmu_reset,
        .EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
+       .MAX_NR_GP_COUNTERS = KVM_AMD_PMC_MAX_GENERIC,
 };
index ef24d25..e8a3be0 100644 (file)
@@ -815,4 +815,5 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = {
        .deliver_pmi = intel_pmu_deliver_pmi,
        .cleanup = intel_pmu_cleanup,
        .EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT,
+       .MAX_NR_GP_COUNTERS = KVM_INTEL_PMC_MAX_GENERIC,
 };
index da02a08..ad95ce9 100644 (file)
@@ -7061,12 +7061,12 @@ static void kvm_init_msr_list(void)
                        break;
                case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR_MAX:
                        if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
-                           min(KVM_INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
+                           kvm_pmu_cap.num_counters_gp)
                                continue;
                        break;
                case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL_MAX:
                        if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
-                           min(KVM_INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
+                           kvm_pmu_cap.num_counters_gp)
                                continue;
                        break;
                case MSR_IA32_XFD:
@@ -9386,7 +9386,7 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
        if (boot_cpu_has(X86_FEATURE_XSAVES))
                rdmsrl(MSR_IA32_XSS, host_xss);
 
-       kvm_init_pmu_capability();
+       kvm_init_pmu_capability(ops->pmu_ops);
 
        r = ops->hardware_setup();
        if (r != 0)