OSDN Git Service

KVM: x86/pmu: prepare the pmu event filter for masked events
authorAaron Lewis <aaronlewis@google.com>
Tue, 20 Dec 2022 16:12:32 +0000 (16:12 +0000)
committerSean Christopherson <seanjc@google.com>
Tue, 24 Jan 2023 18:06:11 +0000 (10:06 -0800)
Refactor check_pmu_event_filter() in preparation for masked events.

No functional changes intended

Signed-off-by: Aaron Lewis <aaronlewis@google.com>
Link: https://lore.kernel.org/r/20221220161236.555143-4-aaronlewis@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/pmu.c

index d29f639..8a734f4 100644 (file)
@@ -263,41 +263,51 @@ static int cmp_u64(const void *pa, const void *pb)
        return (a > b) - (a < b);
 }
 
+static u64 *find_filter_entry(struct kvm_pmu_event_filter *filter, u64 key)
+{
+       return bsearch(&key, filter->events, filter->nevents,
+                      sizeof(filter->events[0]), cmp_u64);
+}
+
+static bool is_gp_event_allowed(struct kvm_pmu_event_filter *filter, u64 eventsel)
+{
+       if (find_filter_entry(filter, eventsel & (kvm_pmu_ops.EVENTSEL_EVENT |
+                                                 ARCH_PERFMON_EVENTSEL_UMASK)))
+               return filter->action == KVM_PMU_EVENT_ALLOW;
+
+       return filter->action == KVM_PMU_EVENT_DENY;
+}
+
+static bool is_fixed_event_allowed(struct kvm_pmu_event_filter *filter, int idx)
+{
+       int fixed_idx = idx - INTEL_PMC_IDX_FIXED;
+
+       if (filter->action == KVM_PMU_EVENT_DENY &&
+           test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
+               return false;
+       if (filter->action == KVM_PMU_EVENT_ALLOW &&
+           !test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
+               return false;
+
+       return true;
+}
+
 static bool check_pmu_event_filter(struct kvm_pmc *pmc)
 {
        struct kvm_pmu_event_filter *filter;
        struct kvm *kvm = pmc->vcpu->kvm;
-       bool allow_event = true;
-       __u64 key;
-       int idx;
 
        if (!static_call(kvm_x86_pmu_hw_event_available)(pmc))
                return false;
 
        filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
        if (!filter)
-               goto out;
+               return true;
 
-       if (pmc_is_gp(pmc)) {
-               key = pmc->eventsel & (kvm_pmu_ops.EVENTSEL_EVENT |
-                                      ARCH_PERFMON_EVENTSEL_UMASK);
-               if (bsearch(&key, filter->events, filter->nevents,
-                           sizeof(__u64), cmp_u64))
-                       allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
-               else
-                       allow_event = filter->action == KVM_PMU_EVENT_DENY;
-       } else {
-               idx = pmc->idx - INTEL_PMC_IDX_FIXED;
-               if (filter->action == KVM_PMU_EVENT_DENY &&
-                   test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
-                       allow_event = false;
-               if (filter->action == KVM_PMU_EVENT_ALLOW &&
-                   !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
-                       allow_event = false;
-       }
+       if (pmc_is_gp(pmc))
+               return is_gp_event_allowed(filter, pmc->eventsel);
 
-out:
-       return allow_event;
+       return is_fixed_event_allowed(filter, pmc->idx);
 }
 
 static void reprogram_counter(struct kvm_pmc *pmc)