OSDN Git Service

Merge 4.4.148 into android-4.4
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / arch / x86 / kernel / cpu / bugs.c
1 /*
2  *  Copyright (C) 1994  Linus Torvalds
3  *
4  *  Cyrix stuff, June 1998 by:
5  *      - Rafael R. Reilova (moved everything from head.S),
6  *        <rreilova@ececs.uc.edu>
7  *      - Channing Corn (tests & fixes),
8  *      - Andrew D. Balsa (code cleanup).
9  */
10 #include <linux/init.h>
11 #include <linux/utsname.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16
17 #include <asm/spec-ctrl.h>
18 #include <asm/cmdline.h>
19 #include <asm/bugs.h>
20 #include <asm/processor.h>
21 #include <asm/processor-flags.h>
22 #include <asm/fpu/internal.h>
23 #include <asm/msr.h>
24 #include <asm/paravirt.h>
25 #include <asm/alternative.h>
26 #include <asm/pgtable.h>
27 #include <asm/cacheflush.h>
28 #include <asm/intel-family.h>
29 #include <asm/e820.h>
30
31 static void __init spectre_v2_select_mitigation(void);
32 static void __init ssb_select_mitigation(void);
33 static void __init l1tf_select_mitigation(void);
34
35 /*
36  * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
37  * writes to SPEC_CTRL contain whatever reserved bits have been set.
38  */
39 u64 x86_spec_ctrl_base;
40 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
41
42 /*
43  * The vendor and possibly platform specific bits which can be modified in
44  * x86_spec_ctrl_base.
45  */
46 static u64 x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
47
48 /*
49  * AMD specific MSR info for Speculative Store Bypass control.
50  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
51  */
52 u64 x86_amd_ls_cfg_base;
53 u64 x86_amd_ls_cfg_ssbd_mask;
54
55 void __init check_bugs(void)
56 {
57         identify_boot_cpu();
58
59         if (!IS_ENABLED(CONFIG_SMP)) {
60                 pr_info("CPU: ");
61                 print_cpu_info(&boot_cpu_data);
62         }
63
64         /*
65          * Read the SPEC_CTRL MSR to account for reserved bits which may
66          * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
67          * init code as it is not enumerated and depends on the family.
68          */
69         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
70                 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
71
72         /* Allow STIBP in MSR_SPEC_CTRL if supported */
73         if (boot_cpu_has(X86_FEATURE_STIBP))
74                 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
75
76         /* Select the proper spectre mitigation before patching alternatives */
77         spectre_v2_select_mitigation();
78
79         /*
80          * Select proper mitigation for any exposure to the Speculative Store
81          * Bypass vulnerability.
82          */
83         ssb_select_mitigation();
84
85         l1tf_select_mitigation();
86
87 #ifdef CONFIG_X86_32
88         /*
89          * Check whether we are able to run this kernel safely on SMP.
90          *
91          * - i386 is no longer supported.
92          * - In order to run on anything without a TSC, we need to be
93          *   compiled for a i486.
94          */
95         if (boot_cpu_data.x86 < 4)
96                 panic("Kernel requires i486+ for 'invlpg' and other features");
97
98         init_utsname()->machine[1] =
99                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
100         alternative_instructions();
101
102         fpu__init_check_bugs();
103 #else /* CONFIG_X86_64 */
104         alternative_instructions();
105
106         /*
107          * Make sure the first 2MB area is not mapped by huge pages
108          * There are typically fixed size MTRRs in there and overlapping
109          * MTRRs into large pages causes slow downs.
110          *
111          * Right now we don't do that with gbpages because there seems
112          * very little benefit for that case.
113          */
114         if (!direct_gbpages)
115                 set_memory_4k((unsigned long)__va(0), 1);
116 #endif
117 }
118
119 /* The kernel command line selection */
120 enum spectre_v2_mitigation_cmd {
121         SPECTRE_V2_CMD_NONE,
122         SPECTRE_V2_CMD_AUTO,
123         SPECTRE_V2_CMD_FORCE,
124         SPECTRE_V2_CMD_RETPOLINE,
125         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
126         SPECTRE_V2_CMD_RETPOLINE_AMD,
127 };
128
129 static const char *spectre_v2_strings[] = {
130         [SPECTRE_V2_NONE]                       = "Vulnerable",
131         [SPECTRE_V2_RETPOLINE_MINIMAL]          = "Vulnerable: Minimal generic ASM retpoline",
132         [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM retpoline",
133         [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
134         [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
135 };
136
137 #undef pr_fmt
138 #define pr_fmt(fmt)     "Spectre V2 : " fmt
139
140 static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
141
142 void
143 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
144 {
145         u64 msrval, guestval, hostval = x86_spec_ctrl_base;
146         struct thread_info *ti = current_thread_info();
147
148         /* Is MSR_SPEC_CTRL implemented ? */
149         if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
150                 /*
151                  * Restrict guest_spec_ctrl to supported values. Clear the
152                  * modifiable bits in the host base value and or the
153                  * modifiable bits from the guest value.
154                  */
155                 guestval = hostval & ~x86_spec_ctrl_mask;
156                 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
157
158                 /* SSBD controlled in MSR_SPEC_CTRL */
159                 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
160                         hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
161
162                 if (hostval != guestval) {
163                         msrval = setguest ? guestval : hostval;
164                         wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
165                 }
166         }
167
168         /*
169          * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
170          * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
171          */
172         if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
173             !static_cpu_has(X86_FEATURE_VIRT_SSBD))
174                 return;
175
176         /*
177          * If the host has SSBD mitigation enabled, force it in the host's
178          * virtual MSR value. If its not permanently enabled, evaluate
179          * current's TIF_SSBD thread flag.
180          */
181         if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
182                 hostval = SPEC_CTRL_SSBD;
183         else
184                 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
185
186         /* Sanitize the guest value */
187         guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
188
189         if (hostval != guestval) {
190                 unsigned long tif;
191
192                 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
193                                  ssbd_spec_ctrl_to_tif(hostval);
194
195                 speculative_store_bypass_update(tif);
196         }
197 }
198 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
199
200 static void x86_amd_ssb_disable(void)
201 {
202         u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
203
204         if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
205                 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
206         else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
207                 wrmsrl(MSR_AMD64_LS_CFG, msrval);
208 }
209
210 #ifdef RETPOLINE
211 static bool spectre_v2_bad_module;
212
213 bool retpoline_module_ok(bool has_retpoline)
214 {
215         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
216                 return true;
217
218         pr_err("System may be vulnerable to spectre v2\n");
219         spectre_v2_bad_module = true;
220         return false;
221 }
222
223 static inline const char *spectre_v2_module_string(void)
224 {
225         return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
226 }
227 #else
228 static inline const char *spectre_v2_module_string(void) { return ""; }
229 #endif
230
231 static void __init spec2_print_if_insecure(const char *reason)
232 {
233         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
234                 pr_info("%s selected on command line.\n", reason);
235 }
236
237 static void __init spec2_print_if_secure(const char *reason)
238 {
239         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
240                 pr_info("%s selected on command line.\n", reason);
241 }
242
243 static inline bool retp_compiler(void)
244 {
245         return __is_defined(RETPOLINE);
246 }
247
248 static inline bool match_option(const char *arg, int arglen, const char *opt)
249 {
250         int len = strlen(opt);
251
252         return len == arglen && !strncmp(arg, opt, len);
253 }
254
255 static const struct {
256         const char *option;
257         enum spectre_v2_mitigation_cmd cmd;
258         bool secure;
259 } mitigation_options[] = {
260         { "off",               SPECTRE_V2_CMD_NONE,              false },
261         { "on",                SPECTRE_V2_CMD_FORCE,             true },
262         { "retpoline",         SPECTRE_V2_CMD_RETPOLINE,         false },
263         { "retpoline,amd",     SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
264         { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
265         { "auto",              SPECTRE_V2_CMD_AUTO,              false },
266 };
267
268 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
269 {
270         char arg[20];
271         int ret, i;
272         enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
273
274         if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
275                 return SPECTRE_V2_CMD_NONE;
276         else {
277                 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
278                 if (ret < 0)
279                         return SPECTRE_V2_CMD_AUTO;
280
281                 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
282                         if (!match_option(arg, ret, mitigation_options[i].option))
283                                 continue;
284                         cmd = mitigation_options[i].cmd;
285                         break;
286                 }
287
288                 if (i >= ARRAY_SIZE(mitigation_options)) {
289                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
290                         return SPECTRE_V2_CMD_AUTO;
291                 }
292         }
293
294         if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
295              cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
296              cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
297             !IS_ENABLED(CONFIG_RETPOLINE)) {
298                 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
299                 return SPECTRE_V2_CMD_AUTO;
300         }
301
302         if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
303             boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
304                 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
305                 return SPECTRE_V2_CMD_AUTO;
306         }
307
308         if (mitigation_options[i].secure)
309                 spec2_print_if_secure(mitigation_options[i].option);
310         else
311                 spec2_print_if_insecure(mitigation_options[i].option);
312
313         return cmd;
314 }
315
316 static void __init spectre_v2_select_mitigation(void)
317 {
318         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
319         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
320
321         /*
322          * If the CPU is not affected and the command line mode is NONE or AUTO
323          * then nothing to do.
324          */
325         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
326             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
327                 return;
328
329         switch (cmd) {
330         case SPECTRE_V2_CMD_NONE:
331                 return;
332
333         case SPECTRE_V2_CMD_FORCE:
334         case SPECTRE_V2_CMD_AUTO:
335                 if (IS_ENABLED(CONFIG_RETPOLINE))
336                         goto retpoline_auto;
337                 break;
338         case SPECTRE_V2_CMD_RETPOLINE_AMD:
339                 if (IS_ENABLED(CONFIG_RETPOLINE))
340                         goto retpoline_amd;
341                 break;
342         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
343                 if (IS_ENABLED(CONFIG_RETPOLINE))
344                         goto retpoline_generic;
345                 break;
346         case SPECTRE_V2_CMD_RETPOLINE:
347                 if (IS_ENABLED(CONFIG_RETPOLINE))
348                         goto retpoline_auto;
349                 break;
350         }
351         pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
352         return;
353
354 retpoline_auto:
355         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
356         retpoline_amd:
357                 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
358                         pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
359                         goto retpoline_generic;
360                 }
361                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
362                                          SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
363                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
364                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
365         } else {
366         retpoline_generic:
367                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
368                                          SPECTRE_V2_RETPOLINE_MINIMAL;
369                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
370         }
371
372         spectre_v2_enabled = mode;
373         pr_info("%s\n", spectre_v2_strings[mode]);
374
375         /*
376          * If spectre v2 protection has been enabled, unconditionally fill
377          * RSB during a context switch; this protects against two independent
378          * issues:
379          *
380          *      - RSB underflow (and switch to BTB) on Skylake+
381          *      - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
382          */
383         setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
384         pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
385
386         /* Initialize Indirect Branch Prediction Barrier if supported */
387         if (boot_cpu_has(X86_FEATURE_IBPB)) {
388                 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
389                 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
390         }
391
392         /*
393          * Retpoline means the kernel is safe because it has no indirect
394          * branches. But firmware isn't, so use IBRS to protect that.
395          */
396         if (boot_cpu_has(X86_FEATURE_IBRS)) {
397                 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
398                 pr_info("Enabling Restricted Speculation for firmware calls\n");
399         }
400 }
401
402 #undef pr_fmt
403 #define pr_fmt(fmt)     "Speculative Store Bypass: " fmt
404
405 static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
406
407 /* The kernel command line selection */
408 enum ssb_mitigation_cmd {
409         SPEC_STORE_BYPASS_CMD_NONE,
410         SPEC_STORE_BYPASS_CMD_AUTO,
411         SPEC_STORE_BYPASS_CMD_ON,
412         SPEC_STORE_BYPASS_CMD_PRCTL,
413         SPEC_STORE_BYPASS_CMD_SECCOMP,
414 };
415
416 static const char *ssb_strings[] = {
417         [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
418         [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
419         [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
420         [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
421 };
422
423 static const struct {
424         const char *option;
425         enum ssb_mitigation_cmd cmd;
426 } ssb_mitigation_options[] = {
427         { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
428         { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
429         { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
430         { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
431         { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
432 };
433
434 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
435 {
436         enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
437         char arg[20];
438         int ret, i;
439
440         if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
441                 return SPEC_STORE_BYPASS_CMD_NONE;
442         } else {
443                 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
444                                           arg, sizeof(arg));
445                 if (ret < 0)
446                         return SPEC_STORE_BYPASS_CMD_AUTO;
447
448                 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
449                         if (!match_option(arg, ret, ssb_mitigation_options[i].option))
450                                 continue;
451
452                         cmd = ssb_mitigation_options[i].cmd;
453                         break;
454                 }
455
456                 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
457                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
458                         return SPEC_STORE_BYPASS_CMD_AUTO;
459                 }
460         }
461
462         return cmd;
463 }
464
465 static enum ssb_mitigation __init __ssb_select_mitigation(void)
466 {
467         enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
468         enum ssb_mitigation_cmd cmd;
469
470         if (!boot_cpu_has(X86_FEATURE_SSBD))
471                 return mode;
472
473         cmd = ssb_parse_cmdline();
474         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
475             (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
476              cmd == SPEC_STORE_BYPASS_CMD_AUTO))
477                 return mode;
478
479         switch (cmd) {
480         case SPEC_STORE_BYPASS_CMD_AUTO:
481         case SPEC_STORE_BYPASS_CMD_SECCOMP:
482                 /*
483                  * Choose prctl+seccomp as the default mode if seccomp is
484                  * enabled.
485                  */
486                 if (IS_ENABLED(CONFIG_SECCOMP))
487                         mode = SPEC_STORE_BYPASS_SECCOMP;
488                 else
489                         mode = SPEC_STORE_BYPASS_PRCTL;
490                 break;
491         case SPEC_STORE_BYPASS_CMD_ON:
492                 mode = SPEC_STORE_BYPASS_DISABLE;
493                 break;
494         case SPEC_STORE_BYPASS_CMD_PRCTL:
495                 mode = SPEC_STORE_BYPASS_PRCTL;
496                 break;
497         case SPEC_STORE_BYPASS_CMD_NONE:
498                 break;
499         }
500
501         /*
502          * We have three CPU feature flags that are in play here:
503          *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
504          *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
505          *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
506          */
507         if (mode == SPEC_STORE_BYPASS_DISABLE) {
508                 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
509                 /*
510                  * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
511                  * a completely different MSR and bit dependent on family.
512                  */
513                 switch (boot_cpu_data.x86_vendor) {
514                 case X86_VENDOR_INTEL:
515                         x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
516                         x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
517                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
518                         break;
519                 case X86_VENDOR_AMD:
520                         x86_amd_ssb_disable();
521                         break;
522                 }
523         }
524
525         return mode;
526 }
527
528 static void ssb_select_mitigation(void)
529 {
530         ssb_mode = __ssb_select_mitigation();
531
532         if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
533                 pr_info("%s\n", ssb_strings[ssb_mode]);
534 }
535
536 #undef pr_fmt
537 #define pr_fmt(fmt)     "Speculation prctl: " fmt
538
539 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
540 {
541         bool update;
542
543         if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
544             ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
545                 return -ENXIO;
546
547         switch (ctrl) {
548         case PR_SPEC_ENABLE:
549                 /* If speculation is force disabled, enable is not allowed */
550                 if (task_spec_ssb_force_disable(task))
551                         return -EPERM;
552                 task_clear_spec_ssb_disable(task);
553                 update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
554                 break;
555         case PR_SPEC_DISABLE:
556                 task_set_spec_ssb_disable(task);
557                 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
558                 break;
559         case PR_SPEC_FORCE_DISABLE:
560                 task_set_spec_ssb_disable(task);
561                 task_set_spec_ssb_force_disable(task);
562                 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
563                 break;
564         default:
565                 return -ERANGE;
566         }
567
568         /*
569          * If being set on non-current task, delay setting the CPU
570          * mitigation until it is next scheduled.
571          */
572         if (task == current && update)
573                 speculative_store_bypass_update_current();
574
575         return 0;
576 }
577
578 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
579                              unsigned long ctrl)
580 {
581         switch (which) {
582         case PR_SPEC_STORE_BYPASS:
583                 return ssb_prctl_set(task, ctrl);
584         default:
585                 return -ENODEV;
586         }
587 }
588
589 #ifdef CONFIG_SECCOMP
590 void arch_seccomp_spec_mitigate(struct task_struct *task)
591 {
592         if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
593                 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
594 }
595 #endif
596
597 static int ssb_prctl_get(struct task_struct *task)
598 {
599         switch (ssb_mode) {
600         case SPEC_STORE_BYPASS_DISABLE:
601                 return PR_SPEC_DISABLE;
602         case SPEC_STORE_BYPASS_SECCOMP:
603         case SPEC_STORE_BYPASS_PRCTL:
604                 if (task_spec_ssb_force_disable(task))
605                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
606                 if (task_spec_ssb_disable(task))
607                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
608                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
609         default:
610                 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
611                         return PR_SPEC_ENABLE;
612                 return PR_SPEC_NOT_AFFECTED;
613         }
614 }
615
616 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
617 {
618         switch (which) {
619         case PR_SPEC_STORE_BYPASS:
620                 return ssb_prctl_get(task);
621         default:
622                 return -ENODEV;
623         }
624 }
625
626 void x86_spec_ctrl_setup_ap(void)
627 {
628         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
629                 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
630
631         if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
632                 x86_amd_ssb_disable();
633 }
634
635 #undef pr_fmt
636 #define pr_fmt(fmt)     "L1TF: " fmt
637 static void __init l1tf_select_mitigation(void)
638 {
639         u64 half_pa;
640
641         if (!boot_cpu_has_bug(X86_BUG_L1TF))
642                 return;
643
644 #if CONFIG_PGTABLE_LEVELS == 2
645         pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
646         return;
647 #endif
648
649         /*
650          * This is extremely unlikely to happen because almost all
651          * systems have far more MAX_PA/2 than RAM can be fit into
652          * DIMM slots.
653          */
654         half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
655         if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
656                 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
657                 return;
658         }
659
660         setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
661 }
662 #undef pr_fmt
663
664 #ifdef CONFIG_SYSFS
665
666 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
667                                char *buf, unsigned int bug)
668 {
669         if (!boot_cpu_has_bug(bug))
670                 return sprintf(buf, "Not affected\n");
671
672         switch (bug) {
673         case X86_BUG_CPU_MELTDOWN:
674                 if (boot_cpu_has(X86_FEATURE_KAISER))
675                         return sprintf(buf, "Mitigation: PTI\n");
676
677                 break;
678
679         case X86_BUG_SPECTRE_V1:
680                 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
681
682         case X86_BUG_SPECTRE_V2:
683                 return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
684                                boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
685                                boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
686                                spectre_v2_module_string());
687
688         case X86_BUG_SPEC_STORE_BYPASS:
689                 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
690
691         case X86_BUG_L1TF:
692                 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
693                         return sprintf(buf, "Mitigation: Page Table Inversion\n");
694                 break;
695
696         default:
697                 break;
698         }
699
700         return sprintf(buf, "Vulnerable\n");
701 }
702
703 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
704 {
705         return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
706 }
707
708 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
709 {
710         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
711 }
712
713 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
714 {
715         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
716 }
717
718 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
719 {
720         return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
721 }
722
723 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
724 {
725         return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
726 }
727 #endif