OSDN Git Service

Merge android-4.4.189 (74c8219) into msm-4.4
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / arch / arm64 / kernel / cpufeature.c
index 847c700..cb475b0 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/cpu_ops.h>
 #include <asm/processor.h>
 #include <asm/sysreg.h>
+#include <asm/virt.h>
 
 unsigned long elf_hwcap __read_mostly;
 EXPORT_SYMBOL_GPL(elf_hwcap);
@@ -91,6 +92,7 @@ static struct arm64_ftr_bits ftr_id_aa64isar0[] = {
 static struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
        ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
        ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0),
+       ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
        ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
@@ -137,10 +139,12 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
 };
 
 static struct arm64_ftr_bits ftr_ctr[] = {
-       U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1),      /* RAO */
-       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
-       U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0),        /* CWG */
-       U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1),      /* RES1 */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 30, 1, 0),
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 24, 4, 0),        /* CWG */
+       U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 20, 4, 0),        /* ERG */
        U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
        /*
         * Linux can handle differing I-cache policies. Userspace JITs will
@@ -351,6 +355,10 @@ static s64 arm64_ftr_safe_value(struct arm64_ftr_bits *ftrp, s64 new, s64 cur)
        case FTR_LOWER_SAFE:
                ret = new < cur ? new : cur;
                break;
+       case FTR_HIGHER_OR_ZERO_SAFE:
+               if (!cur || !new)
+                       break;
+               /* Fallthrough */
        case FTR_HIGHER_SAFE:
                ret = new > cur ? new : cur;
                break;
@@ -650,6 +658,11 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry)
        return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
 }
 
+static bool runs_at_el2(const struct arm64_cpu_capabilities *entry)
+{
+       return is_kernel_in_hyp_mode();
+}
+
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
 
@@ -735,6 +748,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .matches = cpufeature_pan_not_uao,
        },
 #endif /* CONFIG_ARM64_PAN */
+       {
+               .desc = "Virtualization Host Extensions",
+               .capability = ARM64_HAS_VIRT_HOST_EXTN,
+               .matches = runs_at_el2,
+       },
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
        {
                .capability = ARM64_UNMAP_KERNEL_AT_EL0,
@@ -856,8 +874,7 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
  * Run through the enabled capabilities and enable() it on all active
  * CPUs
  */
-static void __init
-enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
 {
        int i;
 
@@ -869,7 +886,8 @@ enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
                         * uses an IPI, giving us a PSTATE that disappears when
                         * we return.
                         */
-                       stop_machine(caps[i].enable, NULL, cpu_online_mask);
+                       stop_machine(caps[i].enable, (void *)&caps[i],
+                                                       cpu_online_mask);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -984,7 +1002,7 @@ void verify_local_cpu_capabilities(void)
                if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i]))
                        fail_incapable_cpu("arm64_features", &caps[i]);
                if (caps[i].enable)
-                       caps[i].enable(NULL);
+                       caps[i].enable((void *)&caps[i]);
        }
 
        for (i = 0, caps = arm64_hwcaps; caps[i].matches; i++) {
@@ -1016,6 +1034,7 @@ void __init setup_cpu_features(void)
 
        /* Set the CPU feature capabilies */
        setup_feature_capabilities();
+       enable_errata_workarounds();
        setup_cpu_hwcaps();
 
        /* Advertise that we have computed the system capabilities */
@@ -1029,9 +1048,9 @@ void __init setup_cpu_features(void)
        if (!cwg)
                pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
                        cls);
-       if (L1_CACHE_BYTES < cls)
-               pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
-                       L1_CACHE_BYTES, cls);
+       if (ARCH_DMA_MINALIGN < cls)
+               pr_warn("ARCH_DMA_MINALIGN smaller than the Cache Writeback Granule (%d < %d)\n",
+                       ARCH_DMA_MINALIGN, cls);
 }
 
 static bool __maybe_unused