2 * Contains CPU specific errata definitions
4 * Copyright (C) 2014 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/types.h>
21 #include <asm/cputype.h>
22 #include <asm/cpufeature.h>
24 static bool __maybe_unused
25 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
27 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
28 return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
29 entry->midr_range_min,
30 entry->midr_range_max);
34 has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
37 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
38 return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
39 (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
42 static int cpu_enable_trap_ctr_access(void *__unused)
44 /* Clear SCTLR_EL1.UCT */
45 config_sctlr_el1(SCTLR_EL1_UCT, 0);
49 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
50 #include <asm/mmu_context.h>
51 #include <asm/cacheflush.h>
53 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
56 extern char __smccc_workaround_1_smc_start[];
57 extern char __smccc_workaround_1_smc_end[];
58 extern char __smccc_workaround_1_hvc_start[];
59 extern char __smccc_workaround_1_hvc_end[];
61 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
62 const char *hyp_vecs_end)
64 void *dst = __bp_harden_hyp_vecs_start + slot * SZ_2K;
67 for (i = 0; i < SZ_2K; i += 0x80)
68 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
70 flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
73 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
74 const char *hyp_vecs_start,
75 const char *hyp_vecs_end)
77 static int last_slot = -1;
78 static DEFINE_SPINLOCK(bp_lock);
82 for_each_possible_cpu(cpu) {
83 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
84 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
91 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
92 / SZ_2K) <= last_slot);
94 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
97 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
98 __this_cpu_write(bp_hardening_data.fn, fn);
99 spin_unlock(&bp_lock);
102 #define __smccc_workaround_1_smc_start NULL
103 #define __smccc_workaround_1_smc_end NULL
104 #define __smccc_workaround_1_hvc_start NULL
105 #define __smccc_workaround_1_hvc_end NULL
107 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
108 const char *hyp_vecs_start,
109 const char *hyp_vecs_end)
111 __this_cpu_write(bp_hardening_data.fn, fn);
113 #endif /* CONFIG_KVM */
115 static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
116 bp_hardening_cb_t fn,
117 const char *hyp_vecs_start,
118 const char *hyp_vecs_end)
122 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
125 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
126 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
129 __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
132 #include <uapi/linux/psci.h>
133 #include <linux/arm-smccc.h>
134 #include <linux/psci.h>
136 static void call_smc_arch_workaround_1(void)
138 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
141 static void call_hvc_arch_workaround_1(void)
143 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
146 static int enable_smccc_arch_workaround_1(void *data)
148 const struct arm64_cpu_capabilities *entry = data;
149 bp_hardening_cb_t cb;
150 void *smccc_start, *smccc_end;
151 struct arm_smccc_res res;
153 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
156 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
159 switch (psci_ops.conduit) {
160 case PSCI_CONDUIT_HVC:
161 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
162 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
165 cb = call_hvc_arch_workaround_1;
166 smccc_start = __smccc_workaround_1_hvc_start;
167 smccc_end = __smccc_workaround_1_hvc_end;
170 case PSCI_CONDUIT_SMC:
171 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
172 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
175 cb = call_smc_arch_workaround_1;
176 smccc_start = __smccc_workaround_1_smc_start;
177 smccc_end = __smccc_workaround_1_smc_end;
184 install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
188 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
190 #ifdef CONFIG_ARM64_SSBD
191 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
193 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
195 static const struct ssbd_options {
199 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
200 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
201 { "kernel", ARM64_SSBD_KERNEL, },
204 static int __init ssbd_cfg(char *buf)
211 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
212 int len = strlen(ssbd_options[i].str);
214 if (strncmp(buf, ssbd_options[i].str, len))
217 ssbd_state = ssbd_options[i].state;
223 early_param("ssbd", ssbd_cfg);
225 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
226 __le32 *origptr, __le32 *updptr,
231 BUG_ON(nr_inst != 1);
233 switch (psci_ops.conduit) {
234 case PSCI_CONDUIT_HVC:
235 insn = aarch64_insn_get_hvc_value();
237 case PSCI_CONDUIT_SMC:
238 insn = aarch64_insn_get_smc_value();
244 *updptr = cpu_to_le32(insn);
247 static void arm64_set_ssbd_mitigation(bool state)
249 switch (psci_ops.conduit) {
250 case PSCI_CONDUIT_HVC:
251 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
254 case PSCI_CONDUIT_SMC:
255 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
264 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
267 struct arm_smccc_res res;
268 bool required = true;
271 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
273 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
274 ssbd_state = ARM64_SSBD_UNKNOWN;
278 switch (psci_ops.conduit) {
279 case PSCI_CONDUIT_HVC:
280 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
281 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
284 case PSCI_CONDUIT_SMC:
285 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
286 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
290 ssbd_state = ARM64_SSBD_UNKNOWN;
297 case SMCCC_RET_NOT_SUPPORTED:
298 ssbd_state = ARM64_SSBD_UNKNOWN;
301 case SMCCC_RET_NOT_REQUIRED:
302 pr_info_once("%s mitigation not required\n", entry->desc);
303 ssbd_state = ARM64_SSBD_MITIGATED;
306 case SMCCC_RET_SUCCESS:
310 case 1: /* Mitigation not required on this CPU */
319 switch (ssbd_state) {
320 case ARM64_SSBD_FORCE_DISABLE:
321 pr_info_once("%s disabled from command-line\n", entry->desc);
322 arm64_set_ssbd_mitigation(false);
326 case ARM64_SSBD_KERNEL:
328 __this_cpu_write(arm64_ssbd_callback_required, 1);
329 arm64_set_ssbd_mitigation(true);
333 case ARM64_SSBD_FORCE_ENABLE:
334 pr_info_once("%s forced from command-line\n", entry->desc);
335 arm64_set_ssbd_mitigation(true);
346 #endif /* CONFIG_ARM64_SSBD */
348 #define MIDR_RANGE(model, min, max) \
349 .def_scope = SCOPE_LOCAL_CPU, \
350 .matches = is_affected_midr_range, \
351 .midr_model = model, \
352 .midr_range_min = min, \
353 .midr_range_max = max
355 #define MIDR_ALL_VERSIONS(model) \
356 .def_scope = SCOPE_LOCAL_CPU, \
357 .matches = is_affected_midr_range, \
358 .midr_model = model, \
359 .midr_range_min = 0, \
360 .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
362 const struct arm64_cpu_capabilities arm64_errata[] = {
363 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
364 defined(CONFIG_ARM64_ERRATUM_827319) || \
365 defined(CONFIG_ARM64_ERRATUM_824069)
367 /* Cortex-A53 r0p[012] */
368 .desc = "ARM errata 826319, 827319, 824069",
369 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
370 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
371 .enable = cpu_enable_cache_maint_trap,
374 #ifdef CONFIG_ARM64_ERRATUM_819472
376 /* Cortex-A53 r0p[01] */
377 .desc = "ARM errata 819472",
378 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
379 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
380 .enable = cpu_enable_cache_maint_trap,
383 #ifdef CONFIG_ARM64_ERRATUM_832075
385 /* Cortex-A57 r0p0 - r1p2 */
386 .desc = "ARM erratum 832075",
387 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
388 MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
389 (1 << MIDR_VARIANT_SHIFT) | 2),
392 #ifdef CONFIG_ARM64_ERRATUM_834220
394 /* Cortex-A57 r0p0 - r1p2 */
395 .desc = "ARM erratum 834220",
396 .capability = ARM64_WORKAROUND_834220,
397 MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
398 (1 << MIDR_VARIANT_SHIFT) | 2),
401 #ifdef CONFIG_ARM64_ERRATUM_845719
403 /* Cortex-A53 r0p[01234] */
404 .desc = "ARM erratum 845719",
405 .capability = ARM64_WORKAROUND_845719,
406 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
409 #ifdef CONFIG_CAVIUM_ERRATUM_23154
411 /* Cavium ThunderX, pass 1.x */
412 .desc = "Cavium erratum 23154",
413 .capability = ARM64_WORKAROUND_CAVIUM_23154,
414 MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
417 #ifdef CONFIG_CAVIUM_ERRATUM_27456
419 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
420 .desc = "Cavium erratum 27456",
421 .capability = ARM64_WORKAROUND_CAVIUM_27456,
422 MIDR_RANGE(MIDR_THUNDERX, 0x00,
423 (1 << MIDR_VARIANT_SHIFT) | 1),
426 /* Cavium ThunderX, T81 pass 1.0 */
427 .desc = "Cavium erratum 27456",
428 .capability = ARM64_WORKAROUND_CAVIUM_27456,
429 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
433 .desc = "Mismatched cache line size",
434 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
435 .matches = has_mismatched_cache_line_size,
436 .def_scope = SCOPE_LOCAL_CPU,
437 .enable = cpu_enable_trap_ctr_access,
439 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
441 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
442 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
443 .enable = enable_smccc_arch_workaround_1,
446 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
447 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
448 .enable = enable_smccc_arch_workaround_1,
451 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
452 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
453 .enable = enable_smccc_arch_workaround_1,
456 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
457 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
458 .enable = enable_smccc_arch_workaround_1,
461 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
462 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
463 .enable = enable_smccc_arch_workaround_1,
466 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
467 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
468 .enable = enable_smccc_arch_workaround_1,
471 #ifdef CONFIG_ARM64_SSBD
473 .desc = "Speculative Store Bypass Disable",
474 .def_scope = SCOPE_LOCAL_CPU,
475 .capability = ARM64_SSBD,
476 .matches = has_ssbd_mitigation,
484 * The CPU Errata work arounds are detected and applied at boot time
485 * and the related information is freed soon after. If the new CPU requires
486 * an errata not detected at boot, fail this CPU.
488 void verify_local_cpu_errata_workarounds(void)
490 const struct arm64_cpu_capabilities *caps = arm64_errata;
492 for (; caps->matches; caps++) {
493 if (cpus_have_cap(caps->capability)) {
495 caps->enable((void *)caps);
496 } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
497 pr_crit("CPU%d: Requires work around for %s, not detected"
500 caps->desc ? : "an erratum");
506 void update_cpu_errata_workarounds(void)
508 update_cpu_capabilities(arm64_errata, "enabling workaround for");
511 void __init enable_errata_workarounds(void)
513 enable_cpu_capabilities(arm64_errata);