OSDN Git Service

KVM: VMX: Directly query Intel PT mode when refreshing PMUs
authorSean Christopherson <sean.j.christopherson@intel.com>
Mon, 2 Mar 2020 23:57:00 +0000 (15:57 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 16 Mar 2020 16:58:38 +0000 (17:58 +0100)
Use vmx_pt_mode_is_host_guest() in intel_pmu_refresh() instead of
bouncing through kvm_x86_ops->pt_supported, and remove ->pt_supported()
as the PMU code was the last remaining user.

Opportunistically clean up the wording of a comment that referenced
kvm_x86_ops->pt_supported().

No functional change intended.

Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 00a1be5..143d0ce 100644 (file)
@@ -1176,8 +1176,6 @@ struct kvm_x86_ops {
        void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu,
                enum exit_fastpath_completion *exit_fastpath);
 
-       bool (*pt_supported)(void);
-
        int (*check_nested_events)(struct kvm_vcpu *vcpu);
        void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
 
index f802d9c..e0be6d0 100644 (file)
@@ -6074,11 +6074,6 @@ static int svm_get_lpage_level(void)
        return PT_PDPE_LEVEL;
 }
 
-static bool svm_pt_supported(void)
-{
-       return false;
-}
-
 static bool svm_has_wbinvd_exit(void)
 {
        return true;
@@ -7440,8 +7435,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 
        .cpuid_update = svm_cpuid_update,
 
-       .pt_supported = svm_pt_supported,
-
        .set_supported_cpuid = svm_set_supported_cpuid,
 
        .has_wbinvd_exit = svm_has_wbinvd_exit,
index e933541..7c85773 100644 (file)
@@ -335,7 +335,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
                        & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
                            MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
-       if (kvm_x86_ops->pt_supported())
+       if (vmx_pt_mode_is_host_guest())
                pmu->global_ovf_ctrl_mask &=
                                ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
 
index e5aeb6f..75f61fb 100644 (file)
@@ -6306,11 +6306,6 @@ static bool vmx_has_emulated_msr(int index)
        }
 }
 
-static bool vmx_pt_supported(void)
-{
-       return vmx_pt_mode_is_host_guest();
-}
-
 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
 {
        u32 exit_intr_info;
@@ -7945,7 +7940,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 
        .check_intercept = vmx_check_intercept,
        .handle_exit_irqoff = vmx_handle_exit_irqoff,
-       .pt_supported = vmx_pt_supported,
 
        .request_immediate_exit = vmx_request_immediate_exit,
 
index f3fac68..5be4961 100644 (file)
@@ -2820,10 +2820,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                    !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
                        return 1;
                /*
-                * We do support PT if kvm_x86_ops->pt_supported(), but we do
-                * not support IA32_XSS[bit 8]. Guests will have to use
-                * RDMSR/WRMSR rather than XSAVES/XRSTORS to save/restore PT
-                * MSRs.
+                * KVM supports exposing PT to the guest, but does not support
+                * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than
+                * XSAVES/XRSTORS to save/restore PT MSRs.
                 */
                if (data != 0)
                        return 1;