OSDN Git Service

c18eb7d41274b296f7709c242f670c1f0986db57
[tomoyo/tomoyo-test1.git] / arch / arm64 / kernel / proton-pack.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as
4  * detailed at:
5  *
6  *   https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
7  *
8  * This code was originally written hastily under an awful lot of stress and so
9  * aspects of it are somewhat hacky. Unfortunately, changing anything in here
10  * instantly makes me feel ill. Thanks, Jann. Thann.
11  *
12  * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
13  * Copyright (C) 2020 Google LLC
14  *
15  * "If there's something strange in your neighbourhood, who you gonna call?"
16  *
17  * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
18  */
19
20 #include <linux/arm-smccc.h>
21 #include <linux/cpu.h>
22 #include <linux/device.h>
23 #include <linux/nospec.h>
24 #include <linux/prctl.h>
25 #include <linux/sched/task_stack.h>
26
27 #include <asm/spectre.h>
28 #include <asm/traps.h>
29
30 /*
31  * We try to ensure that the mitigation state can never change as the result of
32  * onlining a late CPU.
33  */
34 static void update_mitigation_state(enum mitigation_state *oldp,
35                                     enum mitigation_state new)
36 {
37         enum mitigation_state state;
38
39         do {
40                 state = READ_ONCE(*oldp);
41                 if (new <= state)
42                         break;
43
44                 /* Userspace almost certainly can't deal with this. */
45                 if (WARN_ON(system_capabilities_finalized()))
46                         break;
47         } while (cmpxchg_relaxed(oldp, state, new) != state);
48 }
49
50 /*
51  * Spectre v1.
52  *
53  * The kernel can't protect userspace for this one: it's each person for
54  * themselves. Advertise what we're doing and be done with it.
55  */
56 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
57                             char *buf)
58 {
59         return sprintf(buf, "Mitigation: __user pointer sanitization\n");
60 }
61
62 /*
63  * Spectre v2.
64  *
65  * This one sucks. A CPU is either:
66  *
67  * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
68  * - Mitigated in hardware and listed in our "safe list".
69  * - Mitigated in software by firmware.
70  * - Mitigated in software by a CPU-specific dance in the kernel and a
71  *   firmware call at EL2.
72  * - Vulnerable.
73  *
74  * It's not unlikely for different CPUs in a big.LITTLE system to fall into
75  * different camps.
76  */
77 static enum mitigation_state spectre_v2_state;
78
79 static bool __read_mostly __nospectre_v2;
80 static int __init parse_spectre_v2_param(char *str)
81 {
82         __nospectre_v2 = true;
83         return 0;
84 }
85 early_param("nospectre_v2", parse_spectre_v2_param);
86
87 static bool spectre_v2_mitigations_off(void)
88 {
89         bool ret = __nospectre_v2 || cpu_mitigations_off();
90
91         if (ret)
92                 pr_info_once("spectre-v2 mitigation disabled by command line option\n");
93
94         return ret;
95 }
96
97 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
98                             char *buf)
99 {
100         switch (spectre_v2_state) {
101         case SPECTRE_UNAFFECTED:
102                 return sprintf(buf, "Not affected\n");
103         case SPECTRE_MITIGATED:
104                 return sprintf(buf, "Mitigation: Branch predictor hardening\n");
105         case SPECTRE_VULNERABLE:
106                 fallthrough;
107         default:
108                 return sprintf(buf, "Vulnerable\n");
109         }
110 }
111
112 static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
113 {
114         u64 pfr0;
115         static const struct midr_range spectre_v2_safe_list[] = {
116                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
117                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
118                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
119                 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
120                 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
121                 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
122                 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
123                 { /* sentinel */ }
124         };
125
126         /* If the CPU has CSV2 set, we're safe */
127         pfr0 = read_cpuid(ID_AA64PFR0_EL1);
128         if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
129                 return SPECTRE_UNAFFECTED;
130
131         /* Alternatively, we have a list of unaffected CPUs */
132         if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
133                 return SPECTRE_UNAFFECTED;
134
135         return SPECTRE_VULNERABLE;
136 }
137
138 static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
139 {
140         int ret;
141         struct arm_smccc_res res;
142
143         arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
144                              ARM_SMCCC_ARCH_WORKAROUND_1, &res);
145
146         ret = res.a0;
147         switch (ret) {
148         case SMCCC_RET_SUCCESS:
149                 return SPECTRE_MITIGATED;
150         case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
151                 return SPECTRE_UNAFFECTED;
152         default:
153                 fallthrough;
154         case SMCCC_RET_NOT_SUPPORTED:
155                 return SPECTRE_VULNERABLE;
156         }
157 }
158
159 bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
160 {
161         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
162
163         if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
164                 return false;
165
166         if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
167                 return false;
168
169         return true;
170 }
171
172 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
173
174 enum mitigation_state arm64_get_spectre_v2_state(void)
175 {
176         return spectre_v2_state;
177 }
178
179 #ifdef CONFIG_KVM
180 #include <asm/cacheflush.h>
181 #include <asm/kvm_asm.h>
182
183 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
184
185 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
186                                 const char *hyp_vecs_end)
187 {
188         void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
189         int i;
190
191         for (i = 0; i < SZ_2K; i += 0x80)
192                 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
193
194         __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
195 }
196
197 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
198 {
199         static DEFINE_RAW_SPINLOCK(bp_lock);
200         int cpu, slot = -1;
201         const char *hyp_vecs_start = __smccc_workaround_1_smc;
202         const char *hyp_vecs_end = __smccc_workaround_1_smc +
203                                    __SMCCC_WORKAROUND_1_SMC_SZ;
204
205         /*
206          * Vinz Clortho takes the hyp_vecs start/end "keys" at
207          * the door when we're a guest. Skip the hyp-vectors work.
208          */
209         if (!is_hyp_mode_available()) {
210                 __this_cpu_write(bp_hardening_data.fn, fn);
211                 return;
212         }
213
214         raw_spin_lock(&bp_lock);
215         for_each_possible_cpu(cpu) {
216                 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
217                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
218                         break;
219                 }
220         }
221
222         if (slot == -1) {
223                 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
224                 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
225                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
226         }
227
228         __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
229         __this_cpu_write(bp_hardening_data.fn, fn);
230         raw_spin_unlock(&bp_lock);
231 }
232 #else
233 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
234 {
235         __this_cpu_write(bp_hardening_data.fn, fn);
236 }
237 #endif  /* CONFIG_KVM */
238
239 static void call_smc_arch_workaround_1(void)
240 {
241         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
242 }
243
244 static void call_hvc_arch_workaround_1(void)
245 {
246         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
247 }
248
249 static void qcom_link_stack_sanitisation(void)
250 {
251         u64 tmp;
252
253         asm volatile("mov       %0, x30         \n"
254                      ".rept     16              \n"
255                      "bl        . + 4           \n"
256                      ".endr                     \n"
257                      "mov       x30, %0         \n"
258                      : "=&r" (tmp));
259 }
260
261 static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
262 {
263         u32 midr = read_cpuid_id();
264         if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
265             ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
266                 return NULL;
267
268         return qcom_link_stack_sanitisation;
269 }
270
271 static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
272 {
273         bp_hardening_cb_t cb;
274         enum mitigation_state state;
275
276         state = spectre_v2_get_cpu_fw_mitigation_state();
277         if (state != SPECTRE_MITIGATED)
278                 return state;
279
280         if (spectre_v2_mitigations_off())
281                 return SPECTRE_VULNERABLE;
282
283         switch (arm_smccc_1_1_get_conduit()) {
284         case SMCCC_CONDUIT_HVC:
285                 cb = call_hvc_arch_workaround_1;
286                 break;
287
288         case SMCCC_CONDUIT_SMC:
289                 cb = call_smc_arch_workaround_1;
290                 break;
291
292         default:
293                 return SPECTRE_VULNERABLE;
294         }
295
296         /*
297          * Prefer a CPU-specific workaround if it exists. Note that we
298          * still rely on firmware for the mitigation at EL2.
299          */
300         cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
301         install_bp_hardening_cb(cb);
302         return SPECTRE_MITIGATED;
303 }
304
305 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
306 {
307         enum mitigation_state state;
308
309         WARN_ON(preemptible());
310
311         state = spectre_v2_get_cpu_hw_mitigation_state();
312         if (state == SPECTRE_VULNERABLE)
313                 state = spectre_v2_enable_fw_mitigation();
314
315         update_mitigation_state(&spectre_v2_state, state);
316 }
317
318 /*
319  * Spectre v4.
320  *
321  * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
322  * either:
323  *
324  * - Mitigated in hardware and listed in our "safe list".
325  * - Mitigated in hardware via PSTATE.SSBS.
326  * - Mitigated in software by firmware (sometimes referred to as SSBD).
327  *
328  * Wait, that doesn't sound so bad, does it? Keep reading...
329  *
330  * A major source of headaches is that the software mitigation is enabled both
331  * on a per-task basis, but can also be forced on for the kernel, necessitating
332  * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
333  * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
334  * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
335  * so you can have systems that have both firmware and SSBS mitigations. This
336  * means we actually have to reject late onlining of CPUs with mitigations if
337  * all of the currently onlined CPUs are safelisted, as the mitigation tends to
338  * be opt-in for userspace. Yes, really, the cure is worse than the disease.
339  *
340  * The only good part is that if the firmware mitigation is present, then it is
341  * present for all CPUs, meaning we don't have to worry about late onlining of a
342  * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
343  *
344  * Give me a VAX-11/780 any day of the week...
345  */
346 static enum mitigation_state spectre_v4_state;
347
348 /* This is the per-cpu state tracking whether we need to talk to firmware */
349 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
350
351 enum spectre_v4_policy {
352         SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
353         SPECTRE_V4_POLICY_MITIGATION_ENABLED,
354         SPECTRE_V4_POLICY_MITIGATION_DISABLED,
355 };
356
357 static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
358
359 static const struct spectre_v4_param {
360         const char              *str;
361         enum spectre_v4_policy  policy;
362 } spectre_v4_params[] = {
363         { "force-on",   SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
364         { "force-off",  SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
365         { "kernel",     SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
366 };
367 static int __init parse_spectre_v4_param(char *str)
368 {
369         int i;
370
371         if (!str || !str[0])
372                 return -EINVAL;
373
374         for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
375                 const struct spectre_v4_param *param = &spectre_v4_params[i];
376
377                 if (strncmp(str, param->str, strlen(param->str)))
378                         continue;
379
380                 __spectre_v4_policy = param->policy;
381                 return 0;
382         }
383
384         return -EINVAL;
385 }
386 early_param("ssbd", parse_spectre_v4_param);
387
388 /*
389  * Because this was all written in a rush by people working in different silos,
390  * we've ended up with multiple command line options to control the same thing.
391  * Wrap these up in some helpers, which prefer disabling the mitigation if faced
392  * with contradictory parameters. The mitigation is always either "off",
393  * "dynamic" or "on".
394  */
395 static bool spectre_v4_mitigations_off(void)
396 {
397         bool ret = cpu_mitigations_off() ||
398                    __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
399
400         if (ret)
401                 pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
402
403         return ret;
404 }
405
406 /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
407 static bool spectre_v4_mitigations_dynamic(void)
408 {
409         return !spectre_v4_mitigations_off() &&
410                __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
411 }
412
413 static bool spectre_v4_mitigations_on(void)
414 {
415         return !spectre_v4_mitigations_off() &&
416                __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
417 }
418
419 ssize_t cpu_show_spec_store_bypass(struct device *dev,
420                                    struct device_attribute *attr, char *buf)
421 {
422         switch (spectre_v4_state) {
423         case SPECTRE_UNAFFECTED:
424                 return sprintf(buf, "Not affected\n");
425         case SPECTRE_MITIGATED:
426                 return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
427         case SPECTRE_VULNERABLE:
428                 fallthrough;
429         default:
430                 return sprintf(buf, "Vulnerable\n");
431         }
432 }
433
434 enum mitigation_state arm64_get_spectre_v4_state(void)
435 {
436         return spectre_v4_state;
437 }
438
439 static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
440 {
441         static const struct midr_range spectre_v4_safe_list[] = {
442                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
443                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
444                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
445                 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
446                 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
447                 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
448                 { /* sentinel */ },
449         };
450
451         if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
452                 return SPECTRE_UNAFFECTED;
453
454         /* CPU features are detected first */
455         if (this_cpu_has_cap(ARM64_SSBS))
456                 return SPECTRE_MITIGATED;
457
458         return SPECTRE_VULNERABLE;
459 }
460
461 static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
462 {
463         int ret;
464         struct arm_smccc_res res;
465
466         arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
467                              ARM_SMCCC_ARCH_WORKAROUND_2, &res);
468
469         ret = res.a0;
470         switch (ret) {
471         case SMCCC_RET_SUCCESS:
472                 return SPECTRE_MITIGATED;
473         case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
474                 fallthrough;
475         case SMCCC_RET_NOT_REQUIRED:
476                 return SPECTRE_UNAFFECTED;
477         default:
478                 fallthrough;
479         case SMCCC_RET_NOT_SUPPORTED:
480                 return SPECTRE_VULNERABLE;
481         }
482 }
483
484 bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
485 {
486         enum mitigation_state state;
487
488         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
489
490         state = spectre_v4_get_cpu_hw_mitigation_state();
491         if (state == SPECTRE_VULNERABLE)
492                 state = spectre_v4_get_cpu_fw_mitigation_state();
493
494         return state != SPECTRE_UNAFFECTED;
495 }
496
497 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
498 {
499         if (user_mode(regs))
500                 return 1;
501
502         if (instr & BIT(PSTATE_Imm_shift))
503                 regs->pstate |= PSR_SSBS_BIT;
504         else
505                 regs->pstate &= ~PSR_SSBS_BIT;
506
507         arm64_skip_faulting_instruction(regs, 4);
508         return 0;
509 }
510
511 static struct undef_hook ssbs_emulation_hook = {
512         .instr_mask     = ~(1U << PSTATE_Imm_shift),
513         .instr_val      = 0xd500401f | PSTATE_SSBS,
514         .fn             = ssbs_emulation_handler,
515 };
516
517 static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
518 {
519         static bool undef_hook_registered = false;
520         static DEFINE_RAW_SPINLOCK(hook_lock);
521         enum mitigation_state state;
522
523         /*
524          * If the system is mitigated but this CPU doesn't have SSBS, then
525          * we must be on the safelist and there's nothing more to do.
526          */
527         state = spectre_v4_get_cpu_hw_mitigation_state();
528         if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
529                 return state;
530
531         raw_spin_lock(&hook_lock);
532         if (!undef_hook_registered) {
533                 register_undef_hook(&ssbs_emulation_hook);
534                 undef_hook_registered = true;
535         }
536         raw_spin_unlock(&hook_lock);
537
538         if (spectre_v4_mitigations_off()) {
539                 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
540                 asm volatile(SET_PSTATE_SSBS(1));
541                 return SPECTRE_VULNERABLE;
542         }
543
544         /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
545         asm volatile(SET_PSTATE_SSBS(0));
546         return SPECTRE_MITIGATED;
547 }
548
549 /*
550  * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
551  * we fallthrough and check whether firmware needs to be called on this CPU.
552  */
553 void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
554                                                   __le32 *origptr,
555                                                   __le32 *updptr, int nr_inst)
556 {
557         BUG_ON(nr_inst != 1); /* Branch -> NOP */
558
559         if (spectre_v4_mitigations_off())
560                 return;
561
562         if (cpus_have_final_cap(ARM64_SSBS))
563                 return;
564
565         if (spectre_v4_mitigations_dynamic())
566                 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
567 }
568
569 /*
570  * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
571  * to call into firmware to adjust the mitigation state.
572  */
573 void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
574                                                    __le32 *origptr,
575                                                    __le32 *updptr, int nr_inst)
576 {
577         u32 insn;
578
579         BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
580
581         switch (arm_smccc_1_1_get_conduit()) {
582         case SMCCC_CONDUIT_HVC:
583                 insn = aarch64_insn_get_hvc_value();
584                 break;
585         case SMCCC_CONDUIT_SMC:
586                 insn = aarch64_insn_get_smc_value();
587                 break;
588         default:
589                 return;
590         }
591
592         *updptr = cpu_to_le32(insn);
593 }
594
595 static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
596 {
597         enum mitigation_state state;
598
599         state = spectre_v4_get_cpu_fw_mitigation_state();
600         if (state != SPECTRE_MITIGATED)
601                 return state;
602
603         if (spectre_v4_mitigations_off()) {
604                 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
605                 return SPECTRE_VULNERABLE;
606         }
607
608         arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
609
610         if (spectre_v4_mitigations_dynamic())
611                 __this_cpu_write(arm64_ssbd_callback_required, 1);
612
613         return SPECTRE_MITIGATED;
614 }
615
616 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
617 {
618         enum mitigation_state state;
619
620         WARN_ON(preemptible());
621
622         state = spectre_v4_enable_hw_mitigation();
623         if (state == SPECTRE_VULNERABLE)
624                 state = spectre_v4_enable_fw_mitigation();
625
626         update_mitigation_state(&spectre_v4_state, state);
627 }
628
629 static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
630 {
631         u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
632
633         if (state)
634                 regs->pstate |= bit;
635         else
636                 regs->pstate &= ~bit;
637 }
638
639 void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
640 {
641         struct pt_regs *regs = task_pt_regs(tsk);
642         bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
643
644         if (spectre_v4_mitigations_off())
645                 ssbs = true;
646         else if (spectre_v4_mitigations_dynamic() && !kthread)
647                 ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
648
649         __update_pstate_ssbs(regs, ssbs);
650 }
651
652 /*
653  * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
654  * This is interesting because the "speculation disabled" behaviour can be
655  * configured so that it is preserved across exec(), which means that the
656  * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
657  * from userspace.
658  */
659 static void ssbd_prctl_enable_mitigation(struct task_struct *task)
660 {
661         task_clear_spec_ssb_noexec(task);
662         task_set_spec_ssb_disable(task);
663         set_tsk_thread_flag(task, TIF_SSBD);
664 }
665
666 static void ssbd_prctl_disable_mitigation(struct task_struct *task)
667 {
668         task_clear_spec_ssb_noexec(task);
669         task_clear_spec_ssb_disable(task);
670         clear_tsk_thread_flag(task, TIF_SSBD);
671 }
672
673 static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
674 {
675         switch (ctrl) {
676         case PR_SPEC_ENABLE:
677                 /* Enable speculation: disable mitigation */
678                 /*
679                  * Force disabled speculation prevents it from being
680                  * re-enabled.
681                  */
682                 if (task_spec_ssb_force_disable(task))
683                         return -EPERM;
684
685                 /*
686                  * If the mitigation is forced on, then speculation is forced
687                  * off and we again prevent it from being re-enabled.
688                  */
689                 if (spectre_v4_mitigations_on())
690                         return -EPERM;
691
692                 ssbd_prctl_disable_mitigation(task);
693                 break;
694         case PR_SPEC_FORCE_DISABLE:
695                 /* Force disable speculation: force enable mitigation */
696                 /*
697                  * If the mitigation is forced off, then speculation is forced
698                  * on and we prevent it from being disabled.
699                  */
700                 if (spectre_v4_mitigations_off())
701                         return -EPERM;
702
703                 task_set_spec_ssb_force_disable(task);
704                 fallthrough;
705         case PR_SPEC_DISABLE:
706                 /* Disable speculation: enable mitigation */
707                 /* Same as PR_SPEC_FORCE_DISABLE */
708                 if (spectre_v4_mitigations_off())
709                         return -EPERM;
710
711                 ssbd_prctl_enable_mitigation(task);
712                 break;
713         case PR_SPEC_DISABLE_NOEXEC:
714                 /* Disable speculation until execve(): enable mitigation */
715                 /*
716                  * If the mitigation state is forced one way or the other, then
717                  * we must fail now before we try to toggle it on execve().
718                  */
719                 if (task_spec_ssb_force_disable(task) ||
720                     spectre_v4_mitigations_off() ||
721                     spectre_v4_mitigations_on()) {
722                         return -EPERM;
723                 }
724
725                 ssbd_prctl_enable_mitigation(task);
726                 task_set_spec_ssb_noexec(task);
727                 break;
728         default:
729                 return -ERANGE;
730         }
731
732         spectre_v4_enable_task_mitigation(task);
733         return 0;
734 }
735
736 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
737                              unsigned long ctrl)
738 {
739         switch (which) {
740         case PR_SPEC_STORE_BYPASS:
741                 return ssbd_prctl_set(task, ctrl);
742         default:
743                 return -ENODEV;
744         }
745 }
746
747 static int ssbd_prctl_get(struct task_struct *task)
748 {
749         switch (spectre_v4_state) {
750         case SPECTRE_UNAFFECTED:
751                 return PR_SPEC_NOT_AFFECTED;
752         case SPECTRE_MITIGATED:
753                 if (spectre_v4_mitigations_on())
754                         return PR_SPEC_NOT_AFFECTED;
755
756                 if (spectre_v4_mitigations_dynamic())
757                         break;
758
759                 /* Mitigations are disabled, so we're vulnerable. */
760                 fallthrough;
761         case SPECTRE_VULNERABLE:
762                 fallthrough;
763         default:
764                 return PR_SPEC_ENABLE;
765         }
766
767         /* Check the mitigation state for this task */
768         if (task_spec_ssb_force_disable(task))
769                 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
770
771         if (task_spec_ssb_noexec(task))
772                 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
773
774         if (task_spec_ssb_disable(task))
775                 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
776
777         return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
778 }
779
780 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
781 {
782         switch (which) {
783         case PR_SPEC_STORE_BYPASS:
784                 return ssbd_prctl_get(task);
785         default:
786                 return -ENODEV;
787         }
788 }