OSDN Git Service

Merge 4.4.160 into android-4.4-p
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / arch / arm64 / kernel / cpufeature.c
index c1eddc0..847c700 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/sort.h>
 #include <linux/stop_machine.h>
 #include <linux/types.h>
+#include <linux/mm.h>
 #include <asm/cpu.h>
 #include <asm/cpufeature.h>
 #include <asm/cpu_ops.h>
@@ -45,6 +46,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
 #endif
 
 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+EXPORT_SYMBOL(cpu_hwcaps);
 
 #define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
        {                                               \
@@ -69,6 +71,10 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
                .width = 0,                             \
        }
 
+/* meta feature for alternatives */
+static bool __maybe_unused
+cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry);
+
 static struct arm64_ftr_bits ftr_id_aa64isar0[] = {
        ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
        ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
@@ -125,6 +131,11 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
        ARM64_FTR_END,
 };
 
+static struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
+       ARM64_FTR_END,
+};
+
 static struct arm64_ftr_bits ftr_ctr[] = {
        U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1),      /* RAO */
        ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
@@ -286,6 +297,7 @@ static struct arm64_ftr_reg arm64_ftr_regs[] = {
        /* Op1 = 0, CRn = 0, CRm = 7 */
        ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
        ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
+       ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
 
        /* Op1 = 3, CRn = 0, CRm = 0 */
        ARM64_FTR_REG(SYS_CTR_EL0, ftr_ctr),
@@ -410,6 +422,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
        init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
        init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
        init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
+       init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
        init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
        init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
        init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
@@ -519,6 +532,8 @@ void update_cpu_features(int cpu,
                                      info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
        taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
                                      info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
+       taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
+                                     info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
 
        /*
         * EL3 is not our concern.
@@ -623,6 +638,51 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
        return has_sre;
 }
 
+static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry)
+{
+       u32 midr = read_cpuid_id();
+       u32 rv_min, rv_max;
+
+       /* Cavium ThunderX pass 1.x and 2.x */
+       rv_min = 0;
+       rv_max = (1 << MIDR_VARIANT_SHIFT) | MIDR_REVISION_MASK;
+
+       return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
+}
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry)
+{
+       /* Forced on command line? */
+       if (__kpti_forced) {
+               pr_info_once("kernel page table isolation forced %s by command line option\n",
+                            __kpti_forced > 0 ? "ON" : "OFF");
+               return __kpti_forced > 0;
+       }
+
+       /* Useful for KASLR robustness */
+       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+               return true;
+
+       return false;
+}
+
+static int __init parse_kpti(char *str)
+{
+       bool enabled;
+       int ret = strtobool(str, &enabled);
+
+       if (ret)
+               return ret;
+
+       __kpti_forced = enabled ? 1 : -1;
+       return 0;
+}
+__setup("kpti=", parse_kpti);
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "GIC system register CPU interface",
@@ -654,6 +714,34 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
        },
 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
        {
+               .desc = "Software prefetching using PRFM",
+               .capability = ARM64_HAS_NO_HW_PREFETCH,
+               .matches = has_no_hw_prefetch,
+       },
+#ifdef CONFIG_ARM64_UAO
+       {
+               .desc = "User Access Override",
+               .capability = ARM64_HAS_UAO,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64MMFR2_EL1,
+               .field_pos = ID_AA64MMFR2_UAO_SHIFT,
+               .min_field_value = 1,
+               .enable = cpu_enable_uao,
+       },
+#endif /* CONFIG_ARM64_UAO */
+#ifdef CONFIG_ARM64_PAN
+       {
+               .capability = ARM64_ALT_PAN_NOT_UAO,
+               .matches = cpufeature_pan_not_uao,
+       },
+#endif /* CONFIG_ARM64_PAN */
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+       {
+               .capability = ARM64_UNMAP_KERNEL_AT_EL0,
+               .matches = unmap_kernel_at_el0,
+       },
+#endif
+       {
                .desc = "32-bit EL0 Support",
                .capability = ARM64_HAS_32BIT_EL0,
                .matches = has_cpuid_feature,
@@ -694,7 +782,7 @@ static const struct arm64_cpu_capabilities arm64_hwcaps[] = {
        {},
 };
 
-static void cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
+static void __init cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
 {
        switch (cap->hwcap_type) {
        case CAP_HWCAP:
@@ -739,12 +827,12 @@ static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities *
        return rc;
 }
 
-static void setup_cpu_hwcaps(void)
+static void __init setup_cpu_hwcaps(void)
 {
        int i;
        const struct arm64_cpu_capabilities *hwcaps = arm64_hwcaps;
 
-       for (i = 0; hwcaps[i].desc; i++)
+       for (i = 0; hwcaps[i].matches; i++)
                if (hwcaps[i].matches(&hwcaps[i]))
                        cap_set_hwcap(&hwcaps[i]);
 }
@@ -754,11 +842,11 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 {
        int i;
 
-       for (i = 0; caps[i].desc; i++) {
+       for (i = 0; caps[i].matches; i++) {
                if (!caps[i].matches(&caps[i]))
                        continue;
 
-               if (!cpus_have_cap(caps[i].capability))
+               if (!cpus_have_cap(caps[i].capability) && caps[i].desc)
                        pr_info("%s %s\n", info, caps[i].desc);
                cpus_set_cap(caps[i].capability);
        }
@@ -768,11 +856,12 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
  * Run through the enabled capabilities and enable() it on all active
  * CPUs
  */
-static void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+static void __init
+enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
 {
        int i;
 
-       for (i = 0; caps[i].desc; i++)
+       for (i = 0; caps[i].matches; i++)
                if (caps[i].enable && cpus_have_cap(caps[i].capability))
                        /*
                         * Use stop_machine() as it schedules the work allowing
@@ -806,35 +895,36 @@ static inline void set_sys_caps_initialised(void)
 static u64 __raw_read_system_reg(u32 sys_id)
 {
        switch (sys_id) {
-       case SYS_ID_PFR0_EL1:           return (u64)read_cpuid(ID_PFR0_EL1);
-       case SYS_ID_PFR1_EL1:           return (u64)read_cpuid(ID_PFR1_EL1);
-       case SYS_ID_DFR0_EL1:           return (u64)read_cpuid(ID_DFR0_EL1);
-       case SYS_ID_MMFR0_EL1:          return (u64)read_cpuid(ID_MMFR0_EL1);
-       case SYS_ID_MMFR1_EL1:          return (u64)read_cpuid(ID_MMFR1_EL1);
-       case SYS_ID_MMFR2_EL1:          return (u64)read_cpuid(ID_MMFR2_EL1);
-       case SYS_ID_MMFR3_EL1:          return (u64)read_cpuid(ID_MMFR3_EL1);
-       case SYS_ID_ISAR0_EL1:          return (u64)read_cpuid(ID_ISAR0_EL1);
-       case SYS_ID_ISAR1_EL1:          return (u64)read_cpuid(ID_ISAR1_EL1);
-       case SYS_ID_ISAR2_EL1:          return (u64)read_cpuid(ID_ISAR2_EL1);
-       case SYS_ID_ISAR3_EL1:          return (u64)read_cpuid(ID_ISAR3_EL1);
-       case SYS_ID_ISAR4_EL1:          return (u64)read_cpuid(ID_ISAR4_EL1);
-       case SYS_ID_ISAR5_EL1:          return (u64)read_cpuid(ID_ISAR4_EL1);
-       case SYS_MVFR0_EL1:             return (u64)read_cpuid(MVFR0_EL1);
-       case SYS_MVFR1_EL1:             return (u64)read_cpuid(MVFR1_EL1);
-       case SYS_MVFR2_EL1:             return (u64)read_cpuid(MVFR2_EL1);
-
-       case SYS_ID_AA64PFR0_EL1:       return (u64)read_cpuid(ID_AA64PFR0_EL1);
-       case SYS_ID_AA64PFR1_EL1:       return (u64)read_cpuid(ID_AA64PFR0_EL1);
-       case SYS_ID_AA64DFR0_EL1:       return (u64)read_cpuid(ID_AA64DFR0_EL1);
-       case SYS_ID_AA64DFR1_EL1:       return (u64)read_cpuid(ID_AA64DFR0_EL1);
-       case SYS_ID_AA64MMFR0_EL1:      return (u64)read_cpuid(ID_AA64MMFR0_EL1);
-       case SYS_ID_AA64MMFR1_EL1:      return (u64)read_cpuid(ID_AA64MMFR1_EL1);
-       case SYS_ID_AA64ISAR0_EL1:      return (u64)read_cpuid(ID_AA64ISAR0_EL1);
-       case SYS_ID_AA64ISAR1_EL1:      return (u64)read_cpuid(ID_AA64ISAR1_EL1);
-
-       case SYS_CNTFRQ_EL0:            return (u64)read_cpuid(CNTFRQ_EL0);
-       case SYS_CTR_EL0:               return (u64)read_cpuid(CTR_EL0);
-       case SYS_DCZID_EL0:             return (u64)read_cpuid(DCZID_EL0);
+       case SYS_ID_PFR0_EL1:           return read_cpuid(SYS_ID_PFR0_EL1);
+       case SYS_ID_PFR1_EL1:           return read_cpuid(SYS_ID_PFR1_EL1);
+       case SYS_ID_DFR0_EL1:           return read_cpuid(SYS_ID_DFR0_EL1);
+       case SYS_ID_MMFR0_EL1:          return read_cpuid(SYS_ID_MMFR0_EL1);
+       case SYS_ID_MMFR1_EL1:          return read_cpuid(SYS_ID_MMFR1_EL1);
+       case SYS_ID_MMFR2_EL1:          return read_cpuid(SYS_ID_MMFR2_EL1);
+       case SYS_ID_MMFR3_EL1:          return read_cpuid(SYS_ID_MMFR3_EL1);
+       case SYS_ID_ISAR0_EL1:          return read_cpuid(SYS_ID_ISAR0_EL1);
+       case SYS_ID_ISAR1_EL1:          return read_cpuid(SYS_ID_ISAR1_EL1);
+       case SYS_ID_ISAR2_EL1:          return read_cpuid(SYS_ID_ISAR2_EL1);
+       case SYS_ID_ISAR3_EL1:          return read_cpuid(SYS_ID_ISAR3_EL1);
+       case SYS_ID_ISAR4_EL1:          return read_cpuid(SYS_ID_ISAR4_EL1);
+       case SYS_ID_ISAR5_EL1:          return read_cpuid(SYS_ID_ISAR4_EL1);
+       case SYS_MVFR0_EL1:             return read_cpuid(SYS_MVFR0_EL1);
+       case SYS_MVFR1_EL1:             return read_cpuid(SYS_MVFR1_EL1);
+       case SYS_MVFR2_EL1:             return read_cpuid(SYS_MVFR2_EL1);
+
+       case SYS_ID_AA64PFR0_EL1:       return read_cpuid(SYS_ID_AA64PFR0_EL1);
+       case SYS_ID_AA64PFR1_EL1:       return read_cpuid(SYS_ID_AA64PFR0_EL1);
+       case SYS_ID_AA64DFR0_EL1:       return read_cpuid(SYS_ID_AA64DFR0_EL1);
+       case SYS_ID_AA64DFR1_EL1:       return read_cpuid(SYS_ID_AA64DFR0_EL1);
+       case SYS_ID_AA64MMFR0_EL1:      return read_cpuid(SYS_ID_AA64MMFR0_EL1);
+       case SYS_ID_AA64MMFR1_EL1:      return read_cpuid(SYS_ID_AA64MMFR1_EL1);
+       case SYS_ID_AA64MMFR2_EL1:      return read_cpuid(SYS_ID_AA64MMFR2_EL1);
+       case SYS_ID_AA64ISAR0_EL1:      return read_cpuid(SYS_ID_AA64ISAR0_EL1);
+       case SYS_ID_AA64ISAR1_EL1:      return read_cpuid(SYS_ID_AA64ISAR1_EL1);
+
+       case SYS_CNTFRQ_EL0:            return read_cpuid(SYS_CNTFRQ_EL0);
+       case SYS_CTR_EL0:               return read_cpuid(SYS_CTR_EL0);
+       case SYS_DCZID_EL0:             return read_cpuid(SYS_DCZID_EL0);
        default:
                BUG();
                return 0;
@@ -884,7 +974,7 @@ void verify_local_cpu_capabilities(void)
                return;
 
        caps = arm64_features;
-       for (i = 0; caps[i].desc; i++) {
+       for (i = 0; caps[i].matches; i++) {
                if (!cpus_have_cap(caps[i].capability) || !caps[i].sys_reg)
                        continue;
                /*
@@ -897,7 +987,7 @@ void verify_local_cpu_capabilities(void)
                        caps[i].enable(NULL);
        }
 
-       for (i = 0, caps = arm64_hwcaps; caps[i].desc; i++) {
+       for (i = 0, caps = arm64_hwcaps; caps[i].matches; i++) {
                if (!cpus_have_hwcap(&caps[i]))
                        continue;
                if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i]))
@@ -913,7 +1003,7 @@ static inline void set_sys_caps_initialised(void)
 
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static void setup_feature_capabilities(void)
+static void __init setup_feature_capabilities(void)
 {
        update_cpu_capabilities(arm64_features, "detected feature:");
        enable_cpu_capabilities(arm64_features);
@@ -943,3 +1033,9 @@ void __init setup_cpu_features(void)
                pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
                        L1_CACHE_BYTES, cls);
 }
+
+static bool __maybe_unused
+cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry)
+{
+       return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
+}