OSDN Git Service

KVM: x86/pmu: Use binary search to check filtered events
authorJim Mattson <jmattson@google.com>
Sat, 15 Jan 2022 05:24:26 +0000 (21:24 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 19 Jan 2022 17:11:26 +0000 (12:11 -0500)
The PMU event filter may contain up to 300 events. Replace the linear
search in reprogram_gp_counter() with a binary search.

Signed-off-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20220115052431.447232-2-jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/pmu.c

index e632693..2c98f3e 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/types.h>
 #include <linux/kvm_host.h>
 #include <linux/perf_event.h>
+#include <linux/bsearch.h>
+#include <linux/sort.h>
 #include <asm/perf_event.h>
 #include "x86.h"
 #include "cpuid.h"
@@ -172,12 +174,16 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
        return true;
 }
 
+static int cmp_u64(const void *a, const void *b)
+{
+       return *(__u64 *)a - *(__u64 *)b;
+}
+
 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
 {
        unsigned config, type = PERF_TYPE_RAW;
        struct kvm *kvm = pmc->vcpu->kvm;
        struct kvm_pmu_event_filter *filter;
-       int i;
        bool allow_event = true;
 
        if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
@@ -192,16 +198,13 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
 
        filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
        if (filter) {
-               for (i = 0; i < filter->nevents; i++)
-                       if (filter->events[i] ==
-                           (eventsel & AMD64_RAW_EVENT_MASK_NB))
-                               break;
-               if (filter->action == KVM_PMU_EVENT_ALLOW &&
-                   i == filter->nevents)
-                       allow_event = false;
-               if (filter->action == KVM_PMU_EVENT_DENY &&
-                   i < filter->nevents)
-                       allow_event = false;
+               __u64 key = eventsel & AMD64_RAW_EVENT_MASK_NB;
+
+               if (bsearch(&key, filter->events, filter->nevents,
+                           sizeof(__u64), cmp_u64))
+                       allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
+               else
+                       allow_event = filter->action == KVM_PMU_EVENT_DENY;
        }
        if (!allow_event)
                return;
@@ -576,6 +579,11 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
        /* Ensure nevents can't be changed between the user copies. */
        *filter = tmp;
 
+       /*
+        * Sort the in-kernel list so that we can search it with bsearch.
+        */
+       sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL);
+
        mutex_lock(&kvm->lock);
        filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
                                     mutex_is_locked(&kvm->lock));