OSDN Git Service

KVM: arm64: pmu: Don't mark a counter as chained if the odd one is disabled
authorEric Auger <eric.auger@redhat.com>
Fri, 24 Jan 2020 14:25:33 +0000 (15:25 +0100)
committerMarc Zyngier <maz@kernel.org>
Tue, 28 Jan 2020 12:50:33 +0000 (12:50 +0000)
At the moment we update the chain bitmap on type setting. This
does not take into account the enable state of the odd register.

Let's make sure a counter is never considered as chained if
the high counter is disabled.

We recompute the chain state on enable/disable and type changes.

Also let create_perf_event() use the chain bitmap and not use
kvm_pmu_idx_has_chain_evtype().

Suggested-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200124142535.29386-3-eric.auger@redhat.com
virt/kvm/arm/pmu.c

index c3f8b05..9f605e0 100644 (file)
@@ -15,6 +15,8 @@
 #include <kvm/arm_vgic.h>
 
 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
+static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
+static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
 
 #define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
 
@@ -75,6 +77,13 @@ static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
 
        return pmc;
 }
+static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
+{
+       if (kvm_pmu_idx_is_high_counter(pmc->idx))
+               return pmc - 1;
+       else
+               return pmc + 1;
+}
 
 /**
  * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
@@ -294,15 +303,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
 
                pmc = &pmu->pmc[i];
 
-               /*
-                * For high counters of chained events we must recreate the
-                * perf event with the long (64bit) attribute set.
-                */
-               if (kvm_pmu_pmc_is_chained(pmc) &&
-                   kvm_pmu_idx_is_high_counter(i)) {
-                       kvm_pmu_create_perf_event(vcpu, i);
-                       continue;
-               }
+               /* A change in the enable state may affect the chain state */
+               kvm_pmu_update_pmc_chained(vcpu, i);
+               kvm_pmu_create_perf_event(vcpu, i);
 
                /* At this point, pmc must be the canonical */
                if (pmc->perf_event) {
@@ -335,15 +338,9 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
 
                pmc = &pmu->pmc[i];
 
-               /*
-                * For high counters of chained events we must recreate the
-                * perf event with the long (64bit) attribute unset.
-                */
-               if (kvm_pmu_pmc_is_chained(pmc) &&
-                   kvm_pmu_idx_is_high_counter(i)) {
-                       kvm_pmu_create_perf_event(vcpu, i);
-                       continue;
-               }
+               /* A change in the enable state may affect the chain state */
+               kvm_pmu_update_pmc_chained(vcpu, i);
+               kvm_pmu_create_perf_event(vcpu, i);
 
                /* At this point, pmc must be the canonical */
                if (pmc->perf_event)
@@ -585,15 +582,14 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
 
        counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
 
-       if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
+       if (kvm_pmu_pmc_is_chained(pmc)) {
                /**
                 * The initial sample period (overflow count) of an event. For
                 * chained counters we only support overflow interrupts on the
                 * high counter.
                 */
                attr.sample_period = (-counter) & GENMASK(63, 0);
-               if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
-                       attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
+               attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
 
                event = perf_event_create_kernel_counter(&attr, -1, current,
                                                         kvm_pmu_perf_overflow,
@@ -624,25 +620,33 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
  * @select_idx: The number of selected counter
  *
  * Update the chained bitmap based on the event type written in the
- * typer register.
+ * typer register and the enable state of the odd register.
  */
 static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
 {
        struct kvm_pmu *pmu = &vcpu->arch.pmu;
-       struct kvm_pmc *pmc = &pmu->pmc[select_idx];
+       struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
+       bool new_state, old_state;
+
+       old_state = kvm_pmu_pmc_is_chained(pmc);
+       new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
+                   kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
 
-       if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
+       if (old_state == new_state)
+               return;
+
+       canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
+       kvm_pmu_stop_counter(vcpu, canonical_pmc);
+       if (new_state) {
                /*
                 * During promotion from !chained to chained we must ensure
                 * the adjacent counter is stopped and its event destroyed
                 */
-               if (!kvm_pmu_pmc_is_chained(pmc))
-                       kvm_pmu_stop_counter(vcpu, pmc);
-
+               kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
                set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
-       } else {
-               clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
+               return;
        }
+       clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
 }
 
 /**