OSDN Git Service

KVM: arm64: PMU: Do not let AArch32 change the counters' top 32 bits
authorMarc Zyngier <maz@kernel.org>
Sun, 13 Nov 2022 16:38:25 +0000 (16:38 +0000)
committerMarc Zyngier <maz@kernel.org>
Sat, 19 Nov 2022 12:43:47 +0000 (12:43 +0000)
Even when using PMUv3p5 (which implies 64bit counters), there is
no way for AArch32 to write to the top 32 bits of the counters.
The only way to influence these bits (other than by counting
events) is by writing PMCR.P==1.

Make sure we obey the architecture and preserve the top 32 bits
on a counter update.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221113163832.3154370-10-maz@kernel.org
arch/arm64/kvm/pmu-emul.c

index ea0c841..7a945fa 100644 (file)
@@ -119,13 +119,8 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
        return counter;
 }
 
-/**
- * kvm_pmu_set_counter_value - set PMU counter value
- * @vcpu: The vcpu pointer
- * @select_idx: The counter index
- * @val: The counter value
- */
-void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
+static void kvm_pmu_set_counter(struct kvm_vcpu *vcpu, u64 select_idx, u64 val,
+                               bool force)
 {
        u64 reg;
 
@@ -135,6 +130,19 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
        kvm_pmu_release_perf_event(&vcpu->arch.pmu.pmc[select_idx]);
 
        reg = counter_index_to_reg(select_idx);
+
+       if (vcpu_mode_is_32bit(vcpu) && select_idx != ARMV8_PMU_CYCLE_IDX &&
+           !force) {
+               /*
+                * Even with PMUv3p5, AArch32 cannot write to the top
+                * 32bit of the counters. The only possible course of
+                * action is to use PMCR.P, which will reset them to
+                * 0 (the only use of the 'force' parameter).
+                */
+               val  = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32);
+               val |= lower_32_bits(val);
+       }
+
        __vcpu_sys_reg(vcpu, reg) = val;
 
        /* Recreate the perf event to reflect the updated sample_period */
@@ -142,6 +150,17 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
 }
 
 /**
+ * kvm_pmu_set_counter_value - set PMU counter value
+ * @vcpu: The vcpu pointer
+ * @select_idx: The counter index
+ * @val: The counter value
+ */
+void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
+{
+       kvm_pmu_set_counter(vcpu, select_idx, val, false);
+}
+
+/**
  * kvm_pmu_release_perf_event - remove the perf event
  * @pmc: The PMU counter pointer
  */
@@ -533,7 +552,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
                unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
                mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
                for_each_set_bit(i, &mask, 32)
-                       kvm_pmu_set_counter_value(vcpu, i, 0);
+                       kvm_pmu_set_counter(vcpu, i, 0, true);
        }
 }