OSDN Git Service

Merge remote-tracking branch 'arm64/for-next/sysregs' into kvmarm-master/next
authorMarc Zyngier <maz@kernel.org>
Mon, 5 Dec 2022 14:34:32 +0000 (14:34 +0000)
committerMarc Zyngier <maz@kernel.org>
Mon, 5 Dec 2022 14:39:53 +0000 (14:39 +0000)
Merge arm64's sysreg repainting branch to avoid too many
ugly conflicts...

Signed-off-by: Marc Zyngier <maz@kernel.org>
1  2 
arch/arm64/kernel/cpufeature.c
arch/arm64/kvm/sys_regs.c

Simple merge
@@@ -1060,40 -1062,6 +1060,40 @@@ static bool access_arch_timer(struct kv
        return true;
  }
  
-       case ID_DFR0_PERFMON_8_0:
 +static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
 +{
 +      if (kvm_vcpu_has_pmu(vcpu))
 +              return vcpu->kvm->arch.dfr0_pmuver.imp;
 +
 +      return vcpu->kvm->arch.dfr0_pmuver.unimp;
 +}
 +
 +static u8 perfmon_to_pmuver(u8 perfmon)
 +{
 +      switch (perfmon) {
-       case ID_DFR0_PERFMON_IMP_DEF:
++      case ID_DFR0_EL1_PerfMon_PMUv3:
 +              return ID_AA64DFR0_EL1_PMUVer_IMP;
-               return ID_DFR0_PERFMON_8_0;
++      case ID_DFR0_EL1_PerfMon_IMPDEF:
 +              return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
 +      default:
 +              /* Anything ARMv8.1+ and NI have the same value. For now. */
 +              return perfmon;
 +      }
 +}
 +
 +static u8 pmuver_to_perfmon(u8 pmuver)
 +{
 +      switch (pmuver) {
 +      case ID_AA64DFR0_EL1_PMUVer_IMP:
-               return ID_DFR0_PERFMON_IMP_DEF;
++              return ID_DFR0_EL1_PerfMon_PMUv3;
 +      case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
++              return ID_DFR0_EL1_PerfMon_IMPDEF;
 +      default:
 +              /* Anything ARMv8.1+ and NI have the same value. For now. */
 +              return pmuver;
 +      }
 +}
 +
  /* Read a sanitised cpufeature ID register by sys_reg_desc */
  static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r)
  {
                val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
                break;
        case SYS_ID_DFR0_EL1:
-               val &= ~ARM64_FEATURE_MASK(ID_DFR0_PERFMON);
-               val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_PERFMON),
 -              /* Limit guests to PMUv3 for ARMv8.4 */
 -              val = cpuid_feature_cap_perfmon_field(val,
 -                                                    ID_DFR0_EL1_PerfMon_SHIFT,
 -                                                    kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_EL1_PerfMon_PMUv3p4 : 0);
++              val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
++              val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon),
 +                                pmuver_to_perfmon(vcpu_pmuver(vcpu)));
                break;
        }
  
@@@ -1253,85 -1222,6 +1253,85 @@@ static int set_id_aa64pfr0_el1(struct k
        return 0;
  }
  
-       perfmon = FIELD_GET(ARM64_FEATURE_MASK(ID_DFR0_PERFMON), val);
-       if ((perfmon != ID_DFR0_PERFMON_IMP_DEF && perfmon > host_perfmon) ||
-           (perfmon != 0 && perfmon < ID_DFR0_PERFMON_8_0))
 +static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
 +                             const struct sys_reg_desc *rd,
 +                             u64 val)
 +{
 +      u8 pmuver, host_pmuver;
 +      bool valid_pmu;
 +
 +      host_pmuver = kvm_arm_pmu_get_pmuver_limit();
 +
 +      /*
 +       * Allow AA64DFR0_EL1.PMUver to be set from userspace as long
 +       * as it doesn't promise more than what the HW gives us. We
 +       * allow an IMPDEF PMU though, only if no PMU is supported
 +       * (KVM backward compatibility handling).
 +       */
 +      pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val);
 +      if ((pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && pmuver > host_pmuver))
 +              return -EINVAL;
 +
 +      valid_pmu = (pmuver != 0 && pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
 +
 +      /* Make sure view register and PMU support do match */
 +      if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
 +              return -EINVAL;
 +
 +      /* We can only differ with PMUver, and anything else is an error */
 +      val ^= read_id_reg(vcpu, rd);
 +      val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
 +      if (val)
 +              return -EINVAL;
 +
 +      if (valid_pmu)
 +              vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
 +      else
 +              vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
 +
 +      return 0;
 +}
 +
 +static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
 +                         const struct sys_reg_desc *rd,
 +                         u64 val)
 +{
 +      u8 perfmon, host_perfmon;
 +      bool valid_pmu;
 +
 +      host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
 +
 +      /*
 +       * Allow DFR0_EL1.PerfMon to be set from userspace as long as
 +       * it doesn't promise more than what the HW gives us on the
 +       * AArch64 side (as everything is emulated with that), and
 +       * that this is a PMUv3.
 +       */
-       valid_pmu = (perfmon != 0 && perfmon != ID_DFR0_PERFMON_IMP_DEF);
++      perfmon = FIELD_GET(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon), val);
++      if ((perfmon != ID_DFR0_EL1_PerfMon_IMPDEF && perfmon > host_perfmon) ||
++          (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3))
 +              return -EINVAL;
 +
-       val &= ~ARM64_FEATURE_MASK(ID_DFR0_PERFMON);
++      valid_pmu = (perfmon != 0 && perfmon != ID_DFR0_EL1_PerfMon_IMPDEF);
 +
 +      /* Make sure view register and PMU support do match */
 +      if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
 +              return -EINVAL;
 +
 +      /* We can only differ with PerfMon, and anything else is an error */
 +      val ^= read_id_reg(vcpu, rd);
++      val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
 +      if (val)
 +              return -EINVAL;
 +
 +      if (valid_pmu)
 +              vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
 +      else
 +              vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
 +
 +      return 0;
 +}
 +
  /*
   * cpufeature ID register user accessors
   *