Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kernel/cpu/bugs.c
26493 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 1994 Linus Torvalds
4
*
5
* Cyrix stuff, June 1998 by:
6
* - Rafael R. Reilova (moved everything from head.S),
7
* <[email protected]>
8
* - Channing Corn (tests & fixes),
9
* - Andrew D. Balsa (code cleanup).
10
*/
11
#include <linux/init.h>
12
#include <linux/cpu.h>
13
#include <linux/module.h>
14
#include <linux/nospec.h>
15
#include <linux/prctl.h>
16
#include <linux/sched/smt.h>
17
#include <linux/pgtable.h>
18
#include <linux/bpf.h>
19
20
#include <asm/spec-ctrl.h>
21
#include <asm/cmdline.h>
22
#include <asm/bugs.h>
23
#include <asm/processor.h>
24
#include <asm/processor-flags.h>
25
#include <asm/fpu/api.h>
26
#include <asm/msr.h>
27
#include <asm/vmx.h>
28
#include <asm/paravirt.h>
29
#include <asm/cpu_device_id.h>
30
#include <asm/e820/api.h>
31
#include <asm/hypervisor.h>
32
#include <asm/tlbflush.h>
33
#include <asm/cpu.h>
34
35
#include "cpu.h"
36
37
/*
38
* Speculation Vulnerability Handling
39
*
40
* Each vulnerability is handled with the following functions:
41
* <vuln>_select_mitigation() -- Selects a mitigation to use. This should
42
* take into account all relevant command line
43
* options.
44
* <vuln>_update_mitigation() -- This is called after all vulnerabilities have
45
* selected a mitigation, in case the selection
46
* may want to change based on other choices
47
* made. This function is optional.
48
* <vuln>_apply_mitigation() -- Enable the selected mitigation.
49
*
50
* The compile-time mitigation in all cases should be AUTO. An explicit
51
* command-line option can override AUTO. If no such option is
52
* provided, <vuln>_select_mitigation() will override AUTO to the best
53
* mitigation option.
54
*/
55
56
static void __init spectre_v1_select_mitigation(void);
57
static void __init spectre_v1_apply_mitigation(void);
58
static void __init spectre_v2_select_mitigation(void);
59
static void __init spectre_v2_update_mitigation(void);
60
static void __init spectre_v2_apply_mitigation(void);
61
static void __init retbleed_select_mitigation(void);
62
static void __init retbleed_update_mitigation(void);
63
static void __init retbleed_apply_mitigation(void);
64
static void __init spectre_v2_user_select_mitigation(void);
65
static void __init spectre_v2_user_update_mitigation(void);
66
static void __init spectre_v2_user_apply_mitigation(void);
67
static void __init ssb_select_mitigation(void);
68
static void __init ssb_apply_mitigation(void);
69
static void __init l1tf_select_mitigation(void);
70
static void __init l1tf_apply_mitigation(void);
71
static void __init mds_select_mitigation(void);
72
static void __init mds_update_mitigation(void);
73
static void __init mds_apply_mitigation(void);
74
static void __init taa_select_mitigation(void);
75
static void __init taa_update_mitigation(void);
76
static void __init taa_apply_mitigation(void);
77
static void __init mmio_select_mitigation(void);
78
static void __init mmio_update_mitigation(void);
79
static void __init mmio_apply_mitigation(void);
80
static void __init rfds_select_mitigation(void);
81
static void __init rfds_update_mitigation(void);
82
static void __init rfds_apply_mitigation(void);
83
static void __init srbds_select_mitigation(void);
84
static void __init srbds_apply_mitigation(void);
85
static void __init l1d_flush_select_mitigation(void);
86
static void __init srso_select_mitigation(void);
87
static void __init srso_update_mitigation(void);
88
static void __init srso_apply_mitigation(void);
89
static void __init gds_select_mitigation(void);
90
static void __init gds_apply_mitigation(void);
91
static void __init bhi_select_mitigation(void);
92
static void __init bhi_update_mitigation(void);
93
static void __init bhi_apply_mitigation(void);
94
static void __init its_select_mitigation(void);
95
static void __init its_update_mitigation(void);
96
static void __init its_apply_mitigation(void);
97
static void __init tsa_select_mitigation(void);
98
static void __init tsa_apply_mitigation(void);
99
100
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
101
u64 x86_spec_ctrl_base;
102
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
103
104
/* The current value of the SPEC_CTRL MSR with task-specific bits set */
105
DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
106
EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
107
108
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
109
110
static u64 __ro_after_init x86_arch_cap_msr;
111
112
static DEFINE_MUTEX(spec_ctrl_mutex);
113
114
void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
115
116
static void __init set_return_thunk(void *thunk)
117
{
118
x86_return_thunk = thunk;
119
120
pr_info("active return thunk: %ps\n", thunk);
121
}
122
123
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
124
static void update_spec_ctrl(u64 val)
125
{
126
this_cpu_write(x86_spec_ctrl_current, val);
127
wrmsrq(MSR_IA32_SPEC_CTRL, val);
128
}
129
130
/*
131
* Keep track of the SPEC_CTRL MSR value for the current task, which may differ
132
* from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
133
*/
134
void update_spec_ctrl_cond(u64 val)
135
{
136
if (this_cpu_read(x86_spec_ctrl_current) == val)
137
return;
138
139
this_cpu_write(x86_spec_ctrl_current, val);
140
141
/*
142
* When KERNEL_IBRS this MSR is written on return-to-user, unless
143
* forced the update can be delayed until that time.
144
*/
145
if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
146
wrmsrq(MSR_IA32_SPEC_CTRL, val);
147
}
148
149
noinstr u64 spec_ctrl_current(void)
150
{
151
return this_cpu_read(x86_spec_ctrl_current);
152
}
153
EXPORT_SYMBOL_GPL(spec_ctrl_current);
154
155
/*
156
* AMD specific MSR info for Speculative Store Bypass control.
157
* x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
158
*/
159
u64 __ro_after_init x86_amd_ls_cfg_base;
160
u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
161
162
/* Control conditional STIBP in switch_to() */
163
DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
164
/* Control conditional IBPB in switch_mm() */
165
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
166
/* Control unconditional IBPB in switch_mm() */
167
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
168
169
/* Control IBPB on vCPU load */
170
DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
171
EXPORT_SYMBOL_GPL(switch_vcpu_ibpb);
172
173
/* Control CPU buffer clear before idling (halt, mwait) */
174
DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
175
EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
176
177
/*
178
* Controls whether l1d flush based mitigations are enabled,
179
* based on hw features and admin setting via boot parameter
180
* defaults to false
181
*/
182
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
183
184
/*
185
* Controls CPU Fill buffer clear before VMenter. This is a subset of
186
* X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
187
* mitigation is required.
188
*/
189
DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
190
EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
191
192
#undef pr_fmt
193
#define pr_fmt(fmt) "mitigations: " fmt
194
195
static void __init cpu_print_attack_vectors(void)
196
{
197
pr_info("Enabled attack vectors: ");
198
199
if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
200
pr_cont("user_kernel, ");
201
202
if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER))
203
pr_cont("user_user, ");
204
205
if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST))
206
pr_cont("guest_host, ");
207
208
if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST))
209
pr_cont("guest_guest, ");
210
211
pr_cont("SMT mitigations: ");
212
213
switch (smt_mitigations) {
214
case SMT_MITIGATIONS_OFF:
215
pr_cont("off\n");
216
break;
217
case SMT_MITIGATIONS_AUTO:
218
pr_cont("auto\n");
219
break;
220
case SMT_MITIGATIONS_ON:
221
pr_cont("on\n");
222
}
223
}
224
225
void __init cpu_select_mitigations(void)
226
{
227
/*
228
* Read the SPEC_CTRL MSR to account for reserved bits which may
229
* have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
230
* init code as it is not enumerated and depends on the family.
231
*/
232
if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
233
rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
234
235
/*
236
* Previously running kernel (kexec), may have some controls
237
* turned ON. Clear them and let the mitigations setup below
238
* rediscover them based on configuration.
239
*/
240
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
241
}
242
243
x86_arch_cap_msr = x86_read_arch_cap_msr();
244
245
cpu_print_attack_vectors();
246
247
/* Select the proper CPU mitigations before patching alternatives: */
248
spectre_v1_select_mitigation();
249
spectre_v2_select_mitigation();
250
retbleed_select_mitigation();
251
spectre_v2_user_select_mitigation();
252
ssb_select_mitigation();
253
l1tf_select_mitigation();
254
mds_select_mitigation();
255
taa_select_mitigation();
256
mmio_select_mitigation();
257
rfds_select_mitigation();
258
srbds_select_mitigation();
259
l1d_flush_select_mitigation();
260
srso_select_mitigation();
261
gds_select_mitigation();
262
its_select_mitigation();
263
bhi_select_mitigation();
264
tsa_select_mitigation();
265
266
/*
267
* After mitigations are selected, some may need to update their
268
* choices.
269
*/
270
spectre_v2_update_mitigation();
271
/*
272
* retbleed_update_mitigation() relies on the state set by
273
* spectre_v2_update_mitigation(); specifically it wants to know about
274
* spectre_v2=ibrs.
275
*/
276
retbleed_update_mitigation();
277
/*
278
* its_update_mitigation() depends on spectre_v2_update_mitigation()
279
* and retbleed_update_mitigation().
280
*/
281
its_update_mitigation();
282
283
/*
284
* spectre_v2_user_update_mitigation() depends on
285
* retbleed_update_mitigation(), specifically the STIBP
286
* selection is forced for UNRET or IBPB.
287
*/
288
spectre_v2_user_update_mitigation();
289
mds_update_mitigation();
290
taa_update_mitigation();
291
mmio_update_mitigation();
292
rfds_update_mitigation();
293
bhi_update_mitigation();
294
/* srso_update_mitigation() depends on retbleed_update_mitigation(). */
295
srso_update_mitigation();
296
297
spectre_v1_apply_mitigation();
298
spectre_v2_apply_mitigation();
299
retbleed_apply_mitigation();
300
spectre_v2_user_apply_mitigation();
301
ssb_apply_mitigation();
302
l1tf_apply_mitigation();
303
mds_apply_mitigation();
304
taa_apply_mitigation();
305
mmio_apply_mitigation();
306
rfds_apply_mitigation();
307
srbds_apply_mitigation();
308
srso_apply_mitigation();
309
gds_apply_mitigation();
310
its_apply_mitigation();
311
bhi_apply_mitigation();
312
tsa_apply_mitigation();
313
}
314
315
/*
316
* NOTE: This function is *only* called for SVM, since Intel uses
317
* MSR_IA32_SPEC_CTRL for SSBD.
318
*/
319
void
320
x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
321
{
322
u64 guestval, hostval;
323
struct thread_info *ti = current_thread_info();
324
325
/*
326
* If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
327
* MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
328
*/
329
if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
330
!static_cpu_has(X86_FEATURE_VIRT_SSBD))
331
return;
332
333
/*
334
* If the host has SSBD mitigation enabled, force it in the host's
335
* virtual MSR value. If its not permanently enabled, evaluate
336
* current's TIF_SSBD thread flag.
337
*/
338
if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
339
hostval = SPEC_CTRL_SSBD;
340
else
341
hostval = ssbd_tif_to_spec_ctrl(ti->flags);
342
343
/* Sanitize the guest value */
344
guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
345
346
if (hostval != guestval) {
347
unsigned long tif;
348
349
tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
350
ssbd_spec_ctrl_to_tif(hostval);
351
352
speculation_ctrl_update(tif);
353
}
354
}
355
EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
356
357
static void x86_amd_ssb_disable(void)
358
{
359
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
360
361
if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
362
wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
363
else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
364
wrmsrq(MSR_AMD64_LS_CFG, msrval);
365
}
366
367
#undef pr_fmt
368
#define pr_fmt(fmt) "MDS: " fmt
369
370
/*
371
* Returns true if vulnerability should be mitigated based on the
372
* selected attack vector controls.
373
*
374
* See Documentation/admin-guide/hw-vuln/attack_vector_controls.rst
375
*/
376
static bool __init should_mitigate_vuln(unsigned int bug)
377
{
378
switch (bug) {
379
/*
380
* The only runtime-selected spectre_v1 mitigations in the kernel are
381
* related to SWAPGS protection on kernel entry. Therefore, protection
382
* is only required for the user->kernel attack vector.
383
*/
384
case X86_BUG_SPECTRE_V1:
385
return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL);
386
387
case X86_BUG_SPECTRE_V2:
388
case X86_BUG_RETBLEED:
389
case X86_BUG_L1TF:
390
case X86_BUG_ITS:
391
return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
392
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST);
393
394
case X86_BUG_SPECTRE_V2_USER:
395
return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
396
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST);
397
398
/*
399
* All the vulnerabilities below allow potentially leaking data
400
* across address spaces. Therefore, mitigation is required for
401
* any of these 4 attack vectors.
402
*/
403
case X86_BUG_MDS:
404
case X86_BUG_TAA:
405
case X86_BUG_MMIO_STALE_DATA:
406
case X86_BUG_RFDS:
407
case X86_BUG_SRBDS:
408
return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
409
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
410
cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
411
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST);
412
413
case X86_BUG_GDS:
414
return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
415
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
416
cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
417
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) ||
418
(smt_mitigations != SMT_MITIGATIONS_OFF);
419
420
case X86_BUG_SPEC_STORE_BYPASS:
421
return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER);
422
423
default:
424
WARN(1, "Unknown bug %x\n", bug);
425
return false;
426
}
427
}
428
429
/* Default mitigation for MDS-affected CPUs */
430
static enum mds_mitigations mds_mitigation __ro_after_init =
431
IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF;
432
static bool mds_nosmt __ro_after_init = false;
433
434
static const char * const mds_strings[] = {
435
[MDS_MITIGATION_OFF] = "Vulnerable",
436
[MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
437
[MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
438
};
439
440
enum taa_mitigations {
441
TAA_MITIGATION_OFF,
442
TAA_MITIGATION_AUTO,
443
TAA_MITIGATION_UCODE_NEEDED,
444
TAA_MITIGATION_VERW,
445
TAA_MITIGATION_TSX_DISABLED,
446
};
447
448
/* Default mitigation for TAA-affected CPUs */
449
static enum taa_mitigations taa_mitigation __ro_after_init =
450
IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF;
451
452
enum mmio_mitigations {
453
MMIO_MITIGATION_OFF,
454
MMIO_MITIGATION_AUTO,
455
MMIO_MITIGATION_UCODE_NEEDED,
456
MMIO_MITIGATION_VERW,
457
};
458
459
/* Default mitigation for Processor MMIO Stale Data vulnerabilities */
460
static enum mmio_mitigations mmio_mitigation __ro_after_init =
461
IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF;
462
463
enum rfds_mitigations {
464
RFDS_MITIGATION_OFF,
465
RFDS_MITIGATION_AUTO,
466
RFDS_MITIGATION_VERW,
467
RFDS_MITIGATION_UCODE_NEEDED,
468
};
469
470
/* Default mitigation for Register File Data Sampling */
471
static enum rfds_mitigations rfds_mitigation __ro_after_init =
472
IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF;
473
474
/*
475
* Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing
476
* through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry.
477
*/
478
static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init;
479
480
static void __init mds_select_mitigation(void)
481
{
482
if (!boot_cpu_has_bug(X86_BUG_MDS)) {
483
mds_mitigation = MDS_MITIGATION_OFF;
484
return;
485
}
486
487
if (mds_mitigation == MDS_MITIGATION_AUTO) {
488
if (should_mitigate_vuln(X86_BUG_MDS))
489
mds_mitigation = MDS_MITIGATION_FULL;
490
else
491
mds_mitigation = MDS_MITIGATION_OFF;
492
}
493
494
if (mds_mitigation == MDS_MITIGATION_OFF)
495
return;
496
497
verw_clear_cpu_buf_mitigation_selected = true;
498
}
499
500
static void __init mds_update_mitigation(void)
501
{
502
if (!boot_cpu_has_bug(X86_BUG_MDS))
503
return;
504
505
/* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */
506
if (verw_clear_cpu_buf_mitigation_selected)
507
mds_mitigation = MDS_MITIGATION_FULL;
508
509
if (mds_mitigation == MDS_MITIGATION_FULL) {
510
if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
511
mds_mitigation = MDS_MITIGATION_VMWERV;
512
}
513
514
pr_info("%s\n", mds_strings[mds_mitigation]);
515
}
516
517
static void __init mds_apply_mitigation(void)
518
{
519
if (mds_mitigation == MDS_MITIGATION_FULL ||
520
mds_mitigation == MDS_MITIGATION_VMWERV) {
521
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
522
if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
523
(mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON))
524
cpu_smt_disable(false);
525
}
526
}
527
528
static int __init mds_cmdline(char *str)
529
{
530
if (!boot_cpu_has_bug(X86_BUG_MDS))
531
return 0;
532
533
if (!str)
534
return -EINVAL;
535
536
if (!strcmp(str, "off"))
537
mds_mitigation = MDS_MITIGATION_OFF;
538
else if (!strcmp(str, "full"))
539
mds_mitigation = MDS_MITIGATION_FULL;
540
else if (!strcmp(str, "full,nosmt")) {
541
mds_mitigation = MDS_MITIGATION_FULL;
542
mds_nosmt = true;
543
}
544
545
return 0;
546
}
547
early_param("mds", mds_cmdline);
548
549
#undef pr_fmt
550
#define pr_fmt(fmt) "TAA: " fmt
551
552
static bool taa_nosmt __ro_after_init;
553
554
static const char * const taa_strings[] = {
555
[TAA_MITIGATION_OFF] = "Vulnerable",
556
[TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
557
[TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
558
[TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
559
};
560
561
static bool __init taa_vulnerable(void)
562
{
563
return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM);
564
}
565
566
static void __init taa_select_mitigation(void)
567
{
568
if (!boot_cpu_has_bug(X86_BUG_TAA)) {
569
taa_mitigation = TAA_MITIGATION_OFF;
570
return;
571
}
572
573
/* TSX previously disabled by tsx=off */
574
if (!boot_cpu_has(X86_FEATURE_RTM)) {
575
taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
576
return;
577
}
578
579
/* Microcode will be checked in taa_update_mitigation(). */
580
if (taa_mitigation == TAA_MITIGATION_AUTO) {
581
if (should_mitigate_vuln(X86_BUG_TAA))
582
taa_mitigation = TAA_MITIGATION_VERW;
583
else
584
taa_mitigation = TAA_MITIGATION_OFF;
585
}
586
587
if (taa_mitigation != TAA_MITIGATION_OFF)
588
verw_clear_cpu_buf_mitigation_selected = true;
589
}
590
591
static void __init taa_update_mitigation(void)
592
{
593
if (!taa_vulnerable())
594
return;
595
596
if (verw_clear_cpu_buf_mitigation_selected)
597
taa_mitigation = TAA_MITIGATION_VERW;
598
599
if (taa_mitigation == TAA_MITIGATION_VERW) {
600
/* Check if the requisite ucode is available. */
601
if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
602
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
603
604
/*
605
* VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
606
* A microcode update fixes this behavior to clear CPU buffers. It also
607
* adds support for MSR_IA32_TSX_CTRL which is enumerated by the
608
* ARCH_CAP_TSX_CTRL_MSR bit.
609
*
610
* On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
611
* update is required.
612
*/
613
if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
614
!(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
615
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
616
}
617
618
pr_info("%s\n", taa_strings[taa_mitigation]);
619
}
620
621
static void __init taa_apply_mitigation(void)
622
{
623
if (taa_mitigation == TAA_MITIGATION_VERW ||
624
taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) {
625
/*
626
* TSX is enabled, select alternate mitigation for TAA which is
627
* the same as MDS. Enable MDS static branch to clear CPU buffers.
628
*
629
* For guests that can't determine whether the correct microcode is
630
* present on host, enable the mitigation for UCODE_NEEDED as well.
631
*/
632
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
633
634
if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
635
cpu_smt_disable(false);
636
}
637
}
638
639
static int __init tsx_async_abort_parse_cmdline(char *str)
640
{
641
if (!boot_cpu_has_bug(X86_BUG_TAA))
642
return 0;
643
644
if (!str)
645
return -EINVAL;
646
647
if (!strcmp(str, "off")) {
648
taa_mitigation = TAA_MITIGATION_OFF;
649
} else if (!strcmp(str, "full")) {
650
taa_mitigation = TAA_MITIGATION_VERW;
651
} else if (!strcmp(str, "full,nosmt")) {
652
taa_mitigation = TAA_MITIGATION_VERW;
653
taa_nosmt = true;
654
}
655
656
return 0;
657
}
658
early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
659
660
#undef pr_fmt
661
#define pr_fmt(fmt) "MMIO Stale Data: " fmt
662
663
static bool mmio_nosmt __ro_after_init = false;
664
665
static const char * const mmio_strings[] = {
666
[MMIO_MITIGATION_OFF] = "Vulnerable",
667
[MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
668
[MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
669
};
670
671
static void __init mmio_select_mitigation(void)
672
{
673
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
674
cpu_mitigations_off()) {
675
mmio_mitigation = MMIO_MITIGATION_OFF;
676
return;
677
}
678
679
/* Microcode will be checked in mmio_update_mitigation(). */
680
if (mmio_mitigation == MMIO_MITIGATION_AUTO) {
681
if (should_mitigate_vuln(X86_BUG_MMIO_STALE_DATA))
682
mmio_mitigation = MMIO_MITIGATION_VERW;
683
else
684
mmio_mitigation = MMIO_MITIGATION_OFF;
685
}
686
687
if (mmio_mitigation == MMIO_MITIGATION_OFF)
688
return;
689
690
/*
691
* Enable CPU buffer clear mitigation for host and VMM, if also affected
692
* by MDS or TAA.
693
*/
694
if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable())
695
verw_clear_cpu_buf_mitigation_selected = true;
696
}
697
698
static void __init mmio_update_mitigation(void)
699
{
700
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
701
return;
702
703
if (verw_clear_cpu_buf_mitigation_selected)
704
mmio_mitigation = MMIO_MITIGATION_VERW;
705
706
if (mmio_mitigation == MMIO_MITIGATION_VERW) {
707
/*
708
* Check if the system has the right microcode.
709
*
710
* CPU Fill buffer clear mitigation is enumerated by either an explicit
711
* FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
712
* affected systems.
713
*/
714
if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
715
(boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
716
boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
717
!(x86_arch_cap_msr & ARCH_CAP_MDS_NO))))
718
mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
719
}
720
721
pr_info("%s\n", mmio_strings[mmio_mitigation]);
722
}
723
724
static void __init mmio_apply_mitigation(void)
725
{
726
if (mmio_mitigation == MMIO_MITIGATION_OFF)
727
return;
728
729
/*
730
* Only enable the VMM mitigation if the CPU buffer clear mitigation is
731
* not being used.
732
*/
733
if (verw_clear_cpu_buf_mitigation_selected) {
734
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
735
static_branch_disable(&cpu_buf_vm_clear);
736
} else {
737
static_branch_enable(&cpu_buf_vm_clear);
738
}
739
740
/*
741
* If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
742
* be propagated to uncore buffers, clearing the Fill buffers on idle
743
* is required irrespective of SMT state.
744
*/
745
if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
746
static_branch_enable(&cpu_buf_idle_clear);
747
748
if (mmio_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
749
cpu_smt_disable(false);
750
}
751
752
static int __init mmio_stale_data_parse_cmdline(char *str)
753
{
754
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
755
return 0;
756
757
if (!str)
758
return -EINVAL;
759
760
if (!strcmp(str, "off")) {
761
mmio_mitigation = MMIO_MITIGATION_OFF;
762
} else if (!strcmp(str, "full")) {
763
mmio_mitigation = MMIO_MITIGATION_VERW;
764
} else if (!strcmp(str, "full,nosmt")) {
765
mmio_mitigation = MMIO_MITIGATION_VERW;
766
mmio_nosmt = true;
767
}
768
769
return 0;
770
}
771
early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
772
773
#undef pr_fmt
774
#define pr_fmt(fmt) "Register File Data Sampling: " fmt
775
776
static const char * const rfds_strings[] = {
777
[RFDS_MITIGATION_OFF] = "Vulnerable",
778
[RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File",
779
[RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
780
};
781
782
static inline bool __init verw_clears_cpu_reg_file(void)
783
{
784
return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR);
785
}
786
787
static void __init rfds_select_mitigation(void)
788
{
789
if (!boot_cpu_has_bug(X86_BUG_RFDS)) {
790
rfds_mitigation = RFDS_MITIGATION_OFF;
791
return;
792
}
793
794
if (rfds_mitigation == RFDS_MITIGATION_AUTO) {
795
if (should_mitigate_vuln(X86_BUG_RFDS))
796
rfds_mitigation = RFDS_MITIGATION_VERW;
797
else
798
rfds_mitigation = RFDS_MITIGATION_OFF;
799
}
800
801
if (rfds_mitigation == RFDS_MITIGATION_OFF)
802
return;
803
804
if (verw_clears_cpu_reg_file())
805
verw_clear_cpu_buf_mitigation_selected = true;
806
}
807
808
static void __init rfds_update_mitigation(void)
809
{
810
if (!boot_cpu_has_bug(X86_BUG_RFDS))
811
return;
812
813
if (verw_clear_cpu_buf_mitigation_selected)
814
rfds_mitigation = RFDS_MITIGATION_VERW;
815
816
if (rfds_mitigation == RFDS_MITIGATION_VERW) {
817
if (!verw_clears_cpu_reg_file())
818
rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
819
}
820
821
pr_info("%s\n", rfds_strings[rfds_mitigation]);
822
}
823
824
static void __init rfds_apply_mitigation(void)
825
{
826
if (rfds_mitigation == RFDS_MITIGATION_VERW)
827
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
828
}
829
830
static __init int rfds_parse_cmdline(char *str)
831
{
832
if (!str)
833
return -EINVAL;
834
835
if (!boot_cpu_has_bug(X86_BUG_RFDS))
836
return 0;
837
838
if (!strcmp(str, "off"))
839
rfds_mitigation = RFDS_MITIGATION_OFF;
840
else if (!strcmp(str, "on"))
841
rfds_mitigation = RFDS_MITIGATION_VERW;
842
843
return 0;
844
}
845
early_param("reg_file_data_sampling", rfds_parse_cmdline);
846
847
#undef pr_fmt
848
#define pr_fmt(fmt) "SRBDS: " fmt
849
850
enum srbds_mitigations {
851
SRBDS_MITIGATION_OFF,
852
SRBDS_MITIGATION_AUTO,
853
SRBDS_MITIGATION_UCODE_NEEDED,
854
SRBDS_MITIGATION_FULL,
855
SRBDS_MITIGATION_TSX_OFF,
856
SRBDS_MITIGATION_HYPERVISOR,
857
};
858
859
static enum srbds_mitigations srbds_mitigation __ro_after_init =
860
IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF;
861
862
static const char * const srbds_strings[] = {
863
[SRBDS_MITIGATION_OFF] = "Vulnerable",
864
[SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
865
[SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
866
[SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
867
[SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
868
};
869
870
static bool srbds_off;
871
872
void update_srbds_msr(void)
873
{
874
u64 mcu_ctrl;
875
876
if (!boot_cpu_has_bug(X86_BUG_SRBDS))
877
return;
878
879
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
880
return;
881
882
if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
883
return;
884
885
/*
886
* A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
887
* being disabled and it hasn't received the SRBDS MSR microcode.
888
*/
889
if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
890
return;
891
892
rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
893
894
switch (srbds_mitigation) {
895
case SRBDS_MITIGATION_OFF:
896
case SRBDS_MITIGATION_TSX_OFF:
897
mcu_ctrl |= RNGDS_MITG_DIS;
898
break;
899
case SRBDS_MITIGATION_FULL:
900
mcu_ctrl &= ~RNGDS_MITG_DIS;
901
break;
902
default:
903
break;
904
}
905
906
wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
907
}
908
909
static void __init srbds_select_mitigation(void)
910
{
911
if (!boot_cpu_has_bug(X86_BUG_SRBDS)) {
912
srbds_mitigation = SRBDS_MITIGATION_OFF;
913
return;
914
}
915
916
if (srbds_mitigation == SRBDS_MITIGATION_AUTO) {
917
if (should_mitigate_vuln(X86_BUG_SRBDS))
918
srbds_mitigation = SRBDS_MITIGATION_FULL;
919
else {
920
srbds_mitigation = SRBDS_MITIGATION_OFF;
921
return;
922
}
923
}
924
925
/*
926
* Check to see if this is one of the MDS_NO systems supporting TSX that
927
* are only exposed to SRBDS when TSX is enabled or when CPU is affected
928
* by Processor MMIO Stale Data vulnerability.
929
*/
930
if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
931
!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
932
srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
933
else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
934
srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
935
else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
936
srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
937
else if (srbds_off)
938
srbds_mitigation = SRBDS_MITIGATION_OFF;
939
940
pr_info("%s\n", srbds_strings[srbds_mitigation]);
941
}
942
943
static void __init srbds_apply_mitigation(void)
944
{
945
update_srbds_msr();
946
}
947
948
static int __init srbds_parse_cmdline(char *str)
949
{
950
if (!str)
951
return -EINVAL;
952
953
if (!boot_cpu_has_bug(X86_BUG_SRBDS))
954
return 0;
955
956
srbds_off = !strcmp(str, "off");
957
return 0;
958
}
959
early_param("srbds", srbds_parse_cmdline);
960
961
#undef pr_fmt
962
#define pr_fmt(fmt) "L1D Flush : " fmt
963
964
enum l1d_flush_mitigations {
965
L1D_FLUSH_OFF = 0,
966
L1D_FLUSH_ON,
967
};
968
969
static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
970
971
static void __init l1d_flush_select_mitigation(void)
972
{
973
if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
974
return;
975
976
static_branch_enable(&switch_mm_cond_l1d_flush);
977
pr_info("Conditional flush on switch_mm() enabled\n");
978
}
979
980
static int __init l1d_flush_parse_cmdline(char *str)
981
{
982
if (!strcmp(str, "on"))
983
l1d_flush_mitigation = L1D_FLUSH_ON;
984
985
return 0;
986
}
987
early_param("l1d_flush", l1d_flush_parse_cmdline);
988
989
#undef pr_fmt
990
#define pr_fmt(fmt) "GDS: " fmt
991
992
enum gds_mitigations {
993
GDS_MITIGATION_OFF,
994
GDS_MITIGATION_AUTO,
995
GDS_MITIGATION_UCODE_NEEDED,
996
GDS_MITIGATION_FORCE,
997
GDS_MITIGATION_FULL,
998
GDS_MITIGATION_FULL_LOCKED,
999
GDS_MITIGATION_HYPERVISOR,
1000
};
1001
1002
static enum gds_mitigations gds_mitigation __ro_after_init =
1003
IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF;
1004
1005
static const char * const gds_strings[] = {
1006
[GDS_MITIGATION_OFF] = "Vulnerable",
1007
[GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
1008
[GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode",
1009
[GDS_MITIGATION_FULL] = "Mitigation: Microcode",
1010
[GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)",
1011
[GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
1012
};
1013
1014
bool gds_ucode_mitigated(void)
1015
{
1016
return (gds_mitigation == GDS_MITIGATION_FULL ||
1017
gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
1018
}
1019
EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
1020
1021
void update_gds_msr(void)
1022
{
1023
u64 mcu_ctrl_after;
1024
u64 mcu_ctrl;
1025
1026
switch (gds_mitigation) {
1027
case GDS_MITIGATION_OFF:
1028
rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1029
mcu_ctrl |= GDS_MITG_DIS;
1030
break;
1031
case GDS_MITIGATION_FULL_LOCKED:
1032
/*
1033
* The LOCKED state comes from the boot CPU. APs might not have
1034
* the same state. Make sure the mitigation is enabled on all
1035
* CPUs.
1036
*/
1037
case GDS_MITIGATION_FULL:
1038
rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1039
mcu_ctrl &= ~GDS_MITG_DIS;
1040
break;
1041
case GDS_MITIGATION_FORCE:
1042
case GDS_MITIGATION_UCODE_NEEDED:
1043
case GDS_MITIGATION_HYPERVISOR:
1044
case GDS_MITIGATION_AUTO:
1045
return;
1046
}
1047
1048
wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1049
1050
/*
1051
* Check to make sure that the WRMSR value was not ignored. Writes to
1052
* GDS_MITG_DIS will be ignored if this processor is locked but the boot
1053
* processor was not.
1054
*/
1055
rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
1056
WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
1057
}
1058
1059
static void __init gds_select_mitigation(void)
1060
{
1061
u64 mcu_ctrl;
1062
1063
if (!boot_cpu_has_bug(X86_BUG_GDS))
1064
return;
1065
1066
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1067
gds_mitigation = GDS_MITIGATION_HYPERVISOR;
1068
return;
1069
}
1070
1071
/* Will verify below that mitigation _can_ be disabled */
1072
if (gds_mitigation == GDS_MITIGATION_AUTO) {
1073
if (should_mitigate_vuln(X86_BUG_GDS))
1074
gds_mitigation = GDS_MITIGATION_FULL;
1075
else
1076
gds_mitigation = GDS_MITIGATION_OFF;
1077
}
1078
1079
/* No microcode */
1080
if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
1081
if (gds_mitigation != GDS_MITIGATION_FORCE)
1082
gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
1083
return;
1084
}
1085
1086
/* Microcode has mitigation, use it */
1087
if (gds_mitigation == GDS_MITIGATION_FORCE)
1088
gds_mitigation = GDS_MITIGATION_FULL;
1089
1090
rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1091
if (mcu_ctrl & GDS_MITG_LOCKED) {
1092
if (gds_mitigation == GDS_MITIGATION_OFF)
1093
pr_warn("Mitigation locked. Disable failed.\n");
1094
1095
/*
1096
* The mitigation is selected from the boot CPU. All other CPUs
1097
* _should_ have the same state. If the boot CPU isn't locked
1098
* but others are then update_gds_msr() will WARN() of the state
1099
* mismatch. If the boot CPU is locked update_gds_msr() will
1100
* ensure the other CPUs have the mitigation enabled.
1101
*/
1102
gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
1103
}
1104
}
1105
1106
static void __init gds_apply_mitigation(void)
1107
{
1108
if (!boot_cpu_has_bug(X86_BUG_GDS))
1109
return;
1110
1111
/* Microcode is present */
1112
if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)
1113
update_gds_msr();
1114
else if (gds_mitigation == GDS_MITIGATION_FORCE) {
1115
/*
1116
* This only needs to be done on the boot CPU so do it
1117
* here rather than in update_gds_msr()
1118
*/
1119
setup_clear_cpu_cap(X86_FEATURE_AVX);
1120
pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
1121
}
1122
1123
pr_info("%s\n", gds_strings[gds_mitigation]);
1124
}
1125
1126
static int __init gds_parse_cmdline(char *str)
1127
{
1128
if (!str)
1129
return -EINVAL;
1130
1131
if (!boot_cpu_has_bug(X86_BUG_GDS))
1132
return 0;
1133
1134
if (!strcmp(str, "off"))
1135
gds_mitigation = GDS_MITIGATION_OFF;
1136
else if (!strcmp(str, "force"))
1137
gds_mitigation = GDS_MITIGATION_FORCE;
1138
1139
return 0;
1140
}
1141
early_param("gather_data_sampling", gds_parse_cmdline);
1142
1143
#undef pr_fmt
1144
#define pr_fmt(fmt) "Spectre V1 : " fmt
1145
1146
enum spectre_v1_mitigation {
1147
SPECTRE_V1_MITIGATION_NONE,
1148
SPECTRE_V1_MITIGATION_AUTO,
1149
};
1150
1151
static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
1152
IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ?
1153
SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE;
1154
1155
static const char * const spectre_v1_strings[] = {
1156
[SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
1157
[SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
1158
};
1159
1160
/*
1161
* Does SMAP provide full mitigation against speculative kernel access to
1162
* userspace?
1163
*/
1164
static bool smap_works_speculatively(void)
1165
{
1166
if (!boot_cpu_has(X86_FEATURE_SMAP))
1167
return false;
1168
1169
/*
1170
* On CPUs which are vulnerable to Meltdown, SMAP does not
1171
* prevent speculative access to user data in the L1 cache.
1172
* Consider SMAP to be non-functional as a mitigation on these
1173
* CPUs.
1174
*/
1175
if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
1176
return false;
1177
1178
return true;
1179
}
1180
1181
static void __init spectre_v1_select_mitigation(void)
1182
{
1183
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1184
spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1185
1186
if (!should_mitigate_vuln(X86_BUG_SPECTRE_V1))
1187
spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1188
}
1189
1190
static void __init spectre_v1_apply_mitigation(void)
1191
{
1192
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1193
return;
1194
1195
if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
1196
/*
1197
* With Spectre v1, a user can speculatively control either
1198
* path of a conditional swapgs with a user-controlled GS
1199
* value. The mitigation is to add lfences to both code paths.
1200
*
1201
* If FSGSBASE is enabled, the user can put a kernel address in
1202
* GS, in which case SMAP provides no protection.
1203
*
1204
* If FSGSBASE is disabled, the user can only put a user space
1205
* address in GS. That makes an attack harder, but still
1206
* possible if there's no SMAP protection.
1207
*/
1208
if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
1209
!smap_works_speculatively()) {
1210
/*
1211
* Mitigation can be provided from SWAPGS itself or
1212
* PTI as the CR3 write in the Meltdown mitigation
1213
* is serializing.
1214
*
1215
* If neither is there, mitigate with an LFENCE to
1216
* stop speculation through swapgs.
1217
*/
1218
if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
1219
!boot_cpu_has(X86_FEATURE_PTI))
1220
setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
1221
1222
/*
1223
* Enable lfences in the kernel entry (non-swapgs)
1224
* paths, to prevent user entry from speculatively
1225
* skipping swapgs.
1226
*/
1227
setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
1228
}
1229
}
1230
1231
pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
1232
}
1233
1234
static int __init nospectre_v1_cmdline(char *str)
1235
{
1236
spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1237
return 0;
1238
}
1239
early_param("nospectre_v1", nospectre_v1_cmdline);
1240
1241
enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
1242
1243
/* Depends on spectre_v2 mitigation selected already */
1244
static inline bool cdt_possible(enum spectre_v2_mitigation mode)
1245
{
1246
if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) ||
1247
!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE))
1248
return false;
1249
1250
if (mode == SPECTRE_V2_RETPOLINE ||
1251
mode == SPECTRE_V2_EIBRS_RETPOLINE)
1252
return true;
1253
1254
return false;
1255
}
1256
1257
#undef pr_fmt
1258
#define pr_fmt(fmt) "RETBleed: " fmt
1259
1260
enum its_mitigation {
1261
ITS_MITIGATION_OFF,
1262
ITS_MITIGATION_AUTO,
1263
ITS_MITIGATION_VMEXIT_ONLY,
1264
ITS_MITIGATION_ALIGNED_THUNKS,
1265
ITS_MITIGATION_RETPOLINE_STUFF,
1266
};
1267
1268
static enum its_mitigation its_mitigation __ro_after_init =
1269
IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_MITIGATION_AUTO : ITS_MITIGATION_OFF;
1270
1271
enum retbleed_mitigation {
1272
RETBLEED_MITIGATION_NONE,
1273
RETBLEED_MITIGATION_AUTO,
1274
RETBLEED_MITIGATION_UNRET,
1275
RETBLEED_MITIGATION_IBPB,
1276
RETBLEED_MITIGATION_IBRS,
1277
RETBLEED_MITIGATION_EIBRS,
1278
RETBLEED_MITIGATION_STUFF,
1279
};
1280
1281
static const char * const retbleed_strings[] = {
1282
[RETBLEED_MITIGATION_NONE] = "Vulnerable",
1283
[RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
1284
[RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
1285
[RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
1286
[RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
1287
[RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing",
1288
};
1289
1290
static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
1291
IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE;
1292
1293
static int __ro_after_init retbleed_nosmt = false;
1294
1295
enum srso_mitigation {
1296
SRSO_MITIGATION_NONE,
1297
SRSO_MITIGATION_AUTO,
1298
SRSO_MITIGATION_UCODE_NEEDED,
1299
SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
1300
SRSO_MITIGATION_MICROCODE,
1301
SRSO_MITIGATION_NOSMT,
1302
SRSO_MITIGATION_SAFE_RET,
1303
SRSO_MITIGATION_IBPB,
1304
SRSO_MITIGATION_IBPB_ON_VMEXIT,
1305
SRSO_MITIGATION_BP_SPEC_REDUCE,
1306
};
1307
1308
static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO;
1309
1310
static int __init retbleed_parse_cmdline(char *str)
1311
{
1312
if (!str)
1313
return -EINVAL;
1314
1315
while (str) {
1316
char *next = strchr(str, ',');
1317
if (next) {
1318
*next = 0;
1319
next++;
1320
}
1321
1322
if (!strcmp(str, "off")) {
1323
retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1324
} else if (!strcmp(str, "auto")) {
1325
retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1326
} else if (!strcmp(str, "unret")) {
1327
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1328
} else if (!strcmp(str, "ibpb")) {
1329
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1330
} else if (!strcmp(str, "stuff")) {
1331
retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1332
} else if (!strcmp(str, "nosmt")) {
1333
retbleed_nosmt = true;
1334
} else if (!strcmp(str, "force")) {
1335
setup_force_cpu_bug(X86_BUG_RETBLEED);
1336
} else {
1337
pr_err("Ignoring unknown retbleed option (%s).", str);
1338
}
1339
1340
str = next;
1341
}
1342
1343
return 0;
1344
}
1345
early_param("retbleed", retbleed_parse_cmdline);
1346
1347
#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
1348
#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
1349
1350
static void __init retbleed_select_mitigation(void)
1351
{
1352
if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) {
1353
retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1354
return;
1355
}
1356
1357
switch (retbleed_mitigation) {
1358
case RETBLEED_MITIGATION_UNRET:
1359
if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
1360
retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1361
pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
1362
}
1363
break;
1364
case RETBLEED_MITIGATION_IBPB:
1365
if (!boot_cpu_has(X86_FEATURE_IBPB)) {
1366
pr_err("WARNING: CPU does not support IBPB.\n");
1367
retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1368
} else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
1369
pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
1370
retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1371
}
1372
break;
1373
case RETBLEED_MITIGATION_STUFF:
1374
if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1375
pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
1376
retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1377
} else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1378
pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n");
1379
retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1380
}
1381
break;
1382
default:
1383
break;
1384
}
1385
1386
if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO)
1387
return;
1388
1389
if (!should_mitigate_vuln(X86_BUG_RETBLEED)) {
1390
retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1391
return;
1392
}
1393
1394
/* Intel mitigation selected in retbleed_update_mitigation() */
1395
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1396
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
1397
if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
1398
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1399
else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
1400
boot_cpu_has(X86_FEATURE_IBPB))
1401
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1402
else
1403
retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1404
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1405
/* Final mitigation depends on spectre-v2 selection */
1406
if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
1407
retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1408
else if (boot_cpu_has(X86_FEATURE_IBRS))
1409
retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1410
else
1411
retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1412
}
1413
}
1414
1415
static void __init retbleed_update_mitigation(void)
1416
{
1417
if (!boot_cpu_has_bug(X86_BUG_RETBLEED))
1418
return;
1419
1420
/* ITS can also enable stuffing */
1421
if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF)
1422
retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1423
1424
/* If SRSO is using IBPB, that works for retbleed too */
1425
if (srso_mitigation == SRSO_MITIGATION_IBPB)
1426
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1427
1428
if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF &&
1429
!cdt_possible(spectre_v2_enabled)) {
1430
pr_err("WARNING: retbleed=stuff depends on retpoline\n");
1431
retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1432
}
1433
1434
/*
1435
* Let IBRS trump all on Intel without affecting the effects of the
1436
* retbleed= cmdline option except for call depth based stuffing
1437
*/
1438
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1439
switch (spectre_v2_enabled) {
1440
case SPECTRE_V2_IBRS:
1441
retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1442
break;
1443
case SPECTRE_V2_EIBRS:
1444
case SPECTRE_V2_EIBRS_RETPOLINE:
1445
case SPECTRE_V2_EIBRS_LFENCE:
1446
retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1447
break;
1448
default:
1449
if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
1450
pr_err(RETBLEED_INTEL_MSG);
1451
}
1452
}
1453
1454
pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
1455
}
1456
1457
static void __init retbleed_apply_mitigation(void)
1458
{
1459
bool mitigate_smt = false;
1460
1461
switch (retbleed_mitigation) {
1462
case RETBLEED_MITIGATION_NONE:
1463
return;
1464
1465
case RETBLEED_MITIGATION_UNRET:
1466
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1467
setup_force_cpu_cap(X86_FEATURE_UNRET);
1468
1469
set_return_thunk(retbleed_return_thunk);
1470
1471
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
1472
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
1473
pr_err(RETBLEED_UNTRAIN_MSG);
1474
1475
mitigate_smt = true;
1476
break;
1477
1478
case RETBLEED_MITIGATION_IBPB:
1479
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
1480
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
1481
mitigate_smt = true;
1482
1483
/*
1484
* IBPB on entry already obviates the need for
1485
* software-based untraining so clear those in case some
1486
* other mitigation like SRSO has selected them.
1487
*/
1488
setup_clear_cpu_cap(X86_FEATURE_UNRET);
1489
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
1490
1491
/*
1492
* There is no need for RSB filling: write_ibpb() ensures
1493
* all predictions, including the RSB, are invalidated,
1494
* regardless of IBPB implementation.
1495
*/
1496
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1497
1498
break;
1499
1500
case RETBLEED_MITIGATION_STUFF:
1501
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1502
setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1503
1504
set_return_thunk(call_depth_return_thunk);
1505
break;
1506
1507
default:
1508
break;
1509
}
1510
1511
if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
1512
(retbleed_nosmt || smt_mitigations == SMT_MITIGATIONS_ON))
1513
cpu_smt_disable(false);
1514
}
1515
1516
#undef pr_fmt
1517
#define pr_fmt(fmt) "ITS: " fmt
1518
1519
static const char * const its_strings[] = {
1520
[ITS_MITIGATION_OFF] = "Vulnerable",
1521
[ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected",
1522
[ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
1523
[ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB",
1524
};
1525
1526
static int __init its_parse_cmdline(char *str)
1527
{
1528
if (!str)
1529
return -EINVAL;
1530
1531
if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
1532
pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
1533
return 0;
1534
}
1535
1536
if (!strcmp(str, "off")) {
1537
its_mitigation = ITS_MITIGATION_OFF;
1538
} else if (!strcmp(str, "on")) {
1539
its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1540
} else if (!strcmp(str, "force")) {
1541
its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1542
setup_force_cpu_bug(X86_BUG_ITS);
1543
} else if (!strcmp(str, "vmexit")) {
1544
its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
1545
} else if (!strcmp(str, "stuff")) {
1546
its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1547
} else {
1548
pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
1549
}
1550
1551
return 0;
1552
}
1553
early_param("indirect_target_selection", its_parse_cmdline);
1554
1555
static void __init its_select_mitigation(void)
1556
{
1557
if (!boot_cpu_has_bug(X86_BUG_ITS)) {
1558
its_mitigation = ITS_MITIGATION_OFF;
1559
return;
1560
}
1561
1562
if (its_mitigation == ITS_MITIGATION_AUTO) {
1563
if (should_mitigate_vuln(X86_BUG_ITS))
1564
its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1565
else
1566
its_mitigation = ITS_MITIGATION_OFF;
1567
}
1568
1569
if (its_mitigation == ITS_MITIGATION_OFF)
1570
return;
1571
1572
if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
1573
!IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
1574
pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
1575
its_mitigation = ITS_MITIGATION_OFF;
1576
return;
1577
}
1578
1579
if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
1580
pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
1581
its_mitigation = ITS_MITIGATION_OFF;
1582
return;
1583
}
1584
1585
if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1586
!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1587
pr_err("RSB stuff mitigation not supported, using default\n");
1588
its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1589
}
1590
1591
if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY &&
1592
!boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY))
1593
its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1594
}
1595
1596
static void __init its_update_mitigation(void)
1597
{
1598
if (!boot_cpu_has_bug(X86_BUG_ITS))
1599
return;
1600
1601
switch (spectre_v2_enabled) {
1602
case SPECTRE_V2_NONE:
1603
if (its_mitigation != ITS_MITIGATION_OFF)
1604
pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
1605
its_mitigation = ITS_MITIGATION_OFF;
1606
break;
1607
case SPECTRE_V2_RETPOLINE:
1608
case SPECTRE_V2_EIBRS_RETPOLINE:
1609
/* Retpoline+CDT mitigates ITS */
1610
if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF)
1611
its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1612
break;
1613
case SPECTRE_V2_LFENCE:
1614
case SPECTRE_V2_EIBRS_LFENCE:
1615
pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
1616
its_mitigation = ITS_MITIGATION_OFF;
1617
break;
1618
default:
1619
break;
1620
}
1621
1622
if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1623
!cdt_possible(spectre_v2_enabled))
1624
its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1625
1626
pr_info("%s\n", its_strings[its_mitigation]);
1627
}
1628
1629
static void __init its_apply_mitigation(void)
1630
{
1631
switch (its_mitigation) {
1632
case ITS_MITIGATION_OFF:
1633
case ITS_MITIGATION_AUTO:
1634
case ITS_MITIGATION_VMEXIT_ONLY:
1635
break;
1636
case ITS_MITIGATION_ALIGNED_THUNKS:
1637
if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
1638
setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
1639
1640
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1641
set_return_thunk(its_return_thunk);
1642
break;
1643
case ITS_MITIGATION_RETPOLINE_STUFF:
1644
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1645
setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1646
set_return_thunk(call_depth_return_thunk);
1647
break;
1648
}
1649
}
1650
1651
#undef pr_fmt
1652
#define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt
1653
1654
enum tsa_mitigations {
1655
TSA_MITIGATION_NONE,
1656
TSA_MITIGATION_AUTO,
1657
TSA_MITIGATION_UCODE_NEEDED,
1658
TSA_MITIGATION_USER_KERNEL,
1659
TSA_MITIGATION_VM,
1660
TSA_MITIGATION_FULL,
1661
};
1662
1663
static const char * const tsa_strings[] = {
1664
[TSA_MITIGATION_NONE] = "Vulnerable",
1665
[TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
1666
[TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary",
1667
[TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM",
1668
[TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
1669
};
1670
1671
static enum tsa_mitigations tsa_mitigation __ro_after_init =
1672
IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE;
1673
1674
static int __init tsa_parse_cmdline(char *str)
1675
{
1676
if (!str)
1677
return -EINVAL;
1678
1679
if (!strcmp(str, "off"))
1680
tsa_mitigation = TSA_MITIGATION_NONE;
1681
else if (!strcmp(str, "on"))
1682
tsa_mitigation = TSA_MITIGATION_FULL;
1683
else if (!strcmp(str, "user"))
1684
tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
1685
else if (!strcmp(str, "vm"))
1686
tsa_mitigation = TSA_MITIGATION_VM;
1687
else
1688
pr_err("Ignoring unknown tsa=%s option.\n", str);
1689
1690
return 0;
1691
}
1692
early_param("tsa", tsa_parse_cmdline);
1693
1694
static void __init tsa_select_mitigation(void)
1695
{
1696
if (!boot_cpu_has_bug(X86_BUG_TSA)) {
1697
tsa_mitigation = TSA_MITIGATION_NONE;
1698
return;
1699
}
1700
1701
if (tsa_mitigation == TSA_MITIGATION_AUTO) {
1702
bool vm = false, uk = false;
1703
1704
tsa_mitigation = TSA_MITIGATION_NONE;
1705
1706
if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
1707
cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) {
1708
tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
1709
uk = true;
1710
}
1711
1712
if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
1713
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
1714
tsa_mitigation = TSA_MITIGATION_VM;
1715
vm = true;
1716
}
1717
1718
if (uk && vm)
1719
tsa_mitigation = TSA_MITIGATION_FULL;
1720
}
1721
1722
if (tsa_mitigation == TSA_MITIGATION_NONE)
1723
return;
1724
1725
if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR))
1726
tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
1727
1728
/*
1729
* No need to set verw_clear_cpu_buf_mitigation_selected - it
1730
* doesn't fit all cases here and it is not needed because this
1731
* is the only VERW-based mitigation on AMD.
1732
*/
1733
pr_info("%s\n", tsa_strings[tsa_mitigation]);
1734
}
1735
1736
static void __init tsa_apply_mitigation(void)
1737
{
1738
switch (tsa_mitigation) {
1739
case TSA_MITIGATION_USER_KERNEL:
1740
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
1741
break;
1742
case TSA_MITIGATION_VM:
1743
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
1744
break;
1745
case TSA_MITIGATION_FULL:
1746
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
1747
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
1748
break;
1749
default:
1750
break;
1751
}
1752
}
1753
1754
#undef pr_fmt
1755
#define pr_fmt(fmt) "Spectre V2 : " fmt
1756
1757
static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
1758
SPECTRE_V2_USER_NONE;
1759
static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
1760
SPECTRE_V2_USER_NONE;
1761
1762
#ifdef CONFIG_MITIGATION_RETPOLINE
1763
static bool spectre_v2_bad_module;
1764
1765
bool retpoline_module_ok(bool has_retpoline)
1766
{
1767
if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
1768
return true;
1769
1770
pr_err("System may be vulnerable to spectre v2\n");
1771
spectre_v2_bad_module = true;
1772
return false;
1773
}
1774
1775
static inline const char *spectre_v2_module_string(void)
1776
{
1777
return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
1778
}
1779
#else
1780
static inline const char *spectre_v2_module_string(void) { return ""; }
1781
#endif
1782
1783
#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
1784
#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
1785
#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
1786
#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
1787
1788
#ifdef CONFIG_BPF_SYSCALL
1789
void unpriv_ebpf_notify(int new_state)
1790
{
1791
if (new_state)
1792
return;
1793
1794
/* Unprivileged eBPF is enabled */
1795
1796
switch (spectre_v2_enabled) {
1797
case SPECTRE_V2_EIBRS:
1798
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1799
break;
1800
case SPECTRE_V2_EIBRS_LFENCE:
1801
if (sched_smt_active())
1802
pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1803
break;
1804
default:
1805
break;
1806
}
1807
}
1808
#endif
1809
1810
static inline bool match_option(const char *arg, int arglen, const char *opt)
1811
{
1812
int len = strlen(opt);
1813
1814
return len == arglen && !strncmp(arg, opt, len);
1815
}
1816
1817
/* The kernel command line selection for spectre v2 */
1818
enum spectre_v2_mitigation_cmd {
1819
SPECTRE_V2_CMD_NONE,
1820
SPECTRE_V2_CMD_AUTO,
1821
SPECTRE_V2_CMD_FORCE,
1822
SPECTRE_V2_CMD_RETPOLINE,
1823
SPECTRE_V2_CMD_RETPOLINE_GENERIC,
1824
SPECTRE_V2_CMD_RETPOLINE_LFENCE,
1825
SPECTRE_V2_CMD_EIBRS,
1826
SPECTRE_V2_CMD_EIBRS_RETPOLINE,
1827
SPECTRE_V2_CMD_EIBRS_LFENCE,
1828
SPECTRE_V2_CMD_IBRS,
1829
};
1830
1831
static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init = SPECTRE_V2_CMD_AUTO;
1832
1833
enum spectre_v2_user_cmd {
1834
SPECTRE_V2_USER_CMD_NONE,
1835
SPECTRE_V2_USER_CMD_AUTO,
1836
SPECTRE_V2_USER_CMD_FORCE,
1837
SPECTRE_V2_USER_CMD_PRCTL,
1838
SPECTRE_V2_USER_CMD_PRCTL_IBPB,
1839
SPECTRE_V2_USER_CMD_SECCOMP,
1840
SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
1841
};
1842
1843
static const char * const spectre_v2_user_strings[] = {
1844
[SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
1845
[SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
1846
[SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
1847
[SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
1848
[SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
1849
};
1850
1851
static const struct {
1852
const char *option;
1853
enum spectre_v2_user_cmd cmd;
1854
bool secure;
1855
} v2_user_options[] __initconst = {
1856
{ "auto", SPECTRE_V2_USER_CMD_AUTO, false },
1857
{ "off", SPECTRE_V2_USER_CMD_NONE, false },
1858
{ "on", SPECTRE_V2_USER_CMD_FORCE, true },
1859
{ "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
1860
{ "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
1861
{ "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
1862
{ "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
1863
};
1864
1865
static void __init spec_v2_user_print_cond(const char *reason, bool secure)
1866
{
1867
if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1868
pr_info("spectre_v2_user=%s forced on command line.\n", reason);
1869
}
1870
1871
static enum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void)
1872
{
1873
char arg[20];
1874
int ret, i;
1875
1876
if (!IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2))
1877
return SPECTRE_V2_USER_CMD_NONE;
1878
1879
ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
1880
arg, sizeof(arg));
1881
if (ret < 0)
1882
return SPECTRE_V2_USER_CMD_AUTO;
1883
1884
for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
1885
if (match_option(arg, ret, v2_user_options[i].option)) {
1886
spec_v2_user_print_cond(v2_user_options[i].option,
1887
v2_user_options[i].secure);
1888
return v2_user_options[i].cmd;
1889
}
1890
}
1891
1892
pr_err("Unknown user space protection option (%s). Switching to default\n", arg);
1893
return SPECTRE_V2_USER_CMD_AUTO;
1894
}
1895
1896
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1897
{
1898
return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
1899
}
1900
1901
static void __init spectre_v2_user_select_mitigation(void)
1902
{
1903
if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1904
return;
1905
1906
switch (spectre_v2_parse_user_cmdline()) {
1907
case SPECTRE_V2_USER_CMD_NONE:
1908
return;
1909
case SPECTRE_V2_USER_CMD_FORCE:
1910
spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1911
spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1912
break;
1913
case SPECTRE_V2_USER_CMD_AUTO:
1914
if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2_USER))
1915
break;
1916
spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1917
if (smt_mitigations == SMT_MITIGATIONS_OFF)
1918
break;
1919
spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1920
break;
1921
case SPECTRE_V2_USER_CMD_PRCTL:
1922
spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1923
spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1924
break;
1925
case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1926
spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1927
spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1928
break;
1929
case SPECTRE_V2_USER_CMD_SECCOMP:
1930
if (IS_ENABLED(CONFIG_SECCOMP))
1931
spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP;
1932
else
1933
spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1934
spectre_v2_user_stibp = spectre_v2_user_ibpb;
1935
break;
1936
case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1937
spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1938
if (IS_ENABLED(CONFIG_SECCOMP))
1939
spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP;
1940
else
1941
spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1942
break;
1943
}
1944
1945
/*
1946
* At this point, an STIBP mode other than "off" has been set.
1947
* If STIBP support is not being forced, check if STIBP always-on
1948
* is preferred.
1949
*/
1950
if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1951
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) &&
1952
boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
1953
spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
1954
1955
if (!boot_cpu_has(X86_FEATURE_IBPB))
1956
spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1957
1958
if (!boot_cpu_has(X86_FEATURE_STIBP))
1959
spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1960
}
1961
1962
static void __init spectre_v2_user_update_mitigation(void)
1963
{
1964
if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1965
return;
1966
1967
/* The spectre_v2 cmd line can override spectre_v2_user options */
1968
if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) {
1969
spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1970
spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1971
} else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) {
1972
spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1973
spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1974
}
1975
1976
/*
1977
* If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
1978
* is not required.
1979
*
1980
* Intel's Enhanced IBRS also protects against cross-thread branch target
1981
* injection in user-mode as the IBRS bit remains always set which
1982
* implicitly enables cross-thread protections. However, in legacy IBRS
1983
* mode, the IBRS bit is set only on kernel entry and cleared on return
1984
* to userspace. AMD Automatic IBRS also does not protect userspace.
1985
* These modes therefore disable the implicit cross-thread protection,
1986
* so allow for STIBP to be selected in those cases.
1987
*/
1988
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
1989
!cpu_smt_possible() ||
1990
(spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1991
!boot_cpu_has(X86_FEATURE_AUTOIBRS))) {
1992
spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1993
return;
1994
}
1995
1996
if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE &&
1997
(retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
1998
retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) {
1999
if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT &&
2000
spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED)
2001
pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
2002
spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
2003
}
2004
pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]);
2005
}
2006
2007
static void __init spectre_v2_user_apply_mitigation(void)
2008
{
2009
/* Initialize Indirect Branch Prediction Barrier */
2010
if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) {
2011
static_branch_enable(&switch_vcpu_ibpb);
2012
2013
switch (spectre_v2_user_ibpb) {
2014
case SPECTRE_V2_USER_STRICT:
2015
static_branch_enable(&switch_mm_always_ibpb);
2016
break;
2017
case SPECTRE_V2_USER_PRCTL:
2018
case SPECTRE_V2_USER_SECCOMP:
2019
static_branch_enable(&switch_mm_cond_ibpb);
2020
break;
2021
default:
2022
break;
2023
}
2024
2025
pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
2026
static_key_enabled(&switch_mm_always_ibpb) ?
2027
"always-on" : "conditional");
2028
}
2029
}
2030
2031
static const char * const spectre_v2_strings[] = {
2032
[SPECTRE_V2_NONE] = "Vulnerable",
2033
[SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
2034
[SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
2035
[SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS",
2036
[SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
2037
[SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
2038
[SPECTRE_V2_IBRS] = "Mitigation: IBRS",
2039
};
2040
2041
static const struct {
2042
const char *option;
2043
enum spectre_v2_mitigation_cmd cmd;
2044
bool secure;
2045
} mitigation_options[] __initconst = {
2046
{ "off", SPECTRE_V2_CMD_NONE, false },
2047
{ "on", SPECTRE_V2_CMD_FORCE, true },
2048
{ "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
2049
{ "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
2050
{ "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
2051
{ "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
2052
{ "eibrs", SPECTRE_V2_CMD_EIBRS, false },
2053
{ "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
2054
{ "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
2055
{ "auto", SPECTRE_V2_CMD_AUTO, false },
2056
{ "ibrs", SPECTRE_V2_CMD_IBRS, false },
2057
};
2058
2059
static void __init spec_v2_print_cond(const char *reason, bool secure)
2060
{
2061
if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
2062
pr_info("%s selected on command line.\n", reason);
2063
}
2064
2065
static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
2066
{
2067
enum spectre_v2_mitigation_cmd cmd;
2068
char arg[20];
2069
int ret, i;
2070
2071
cmd = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE;
2072
if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
2073
return SPECTRE_V2_CMD_NONE;
2074
2075
ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
2076
if (ret < 0)
2077
return cmd;
2078
2079
for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
2080
if (!match_option(arg, ret, mitigation_options[i].option))
2081
continue;
2082
cmd = mitigation_options[i].cmd;
2083
break;
2084
}
2085
2086
if (i >= ARRAY_SIZE(mitigation_options)) {
2087
pr_err("unknown option (%s). Switching to default mode\n", arg);
2088
return cmd;
2089
}
2090
2091
if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
2092
cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
2093
cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
2094
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
2095
cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
2096
!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
2097
pr_err("%s selected but not compiled in. Switching to AUTO select\n",
2098
mitigation_options[i].option);
2099
return SPECTRE_V2_CMD_AUTO;
2100
}
2101
2102
if ((cmd == SPECTRE_V2_CMD_EIBRS ||
2103
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
2104
cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
2105
!boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
2106
pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
2107
mitigation_options[i].option);
2108
return SPECTRE_V2_CMD_AUTO;
2109
}
2110
2111
if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
2112
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
2113
!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
2114
pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
2115
mitigation_options[i].option);
2116
return SPECTRE_V2_CMD_AUTO;
2117
}
2118
2119
if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) {
2120
pr_err("%s selected but not compiled in. Switching to AUTO select\n",
2121
mitigation_options[i].option);
2122
return SPECTRE_V2_CMD_AUTO;
2123
}
2124
2125
if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
2126
pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
2127
mitigation_options[i].option);
2128
return SPECTRE_V2_CMD_AUTO;
2129
}
2130
2131
if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
2132
pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
2133
mitigation_options[i].option);
2134
return SPECTRE_V2_CMD_AUTO;
2135
}
2136
2137
if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) {
2138
pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
2139
mitigation_options[i].option);
2140
return SPECTRE_V2_CMD_AUTO;
2141
}
2142
2143
spec_v2_print_cond(mitigation_options[i].option,
2144
mitigation_options[i].secure);
2145
return cmd;
2146
}
2147
2148
static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
2149
{
2150
if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
2151
pr_err("Kernel not compiled with retpoline; no mitigation available!");
2152
return SPECTRE_V2_NONE;
2153
}
2154
2155
return SPECTRE_V2_RETPOLINE;
2156
}
2157
2158
static bool __ro_after_init rrsba_disabled;
2159
2160
/* Disable in-kernel use of non-RSB RET predictors */
2161
static void __init spec_ctrl_disable_kernel_rrsba(void)
2162
{
2163
if (rrsba_disabled)
2164
return;
2165
2166
if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
2167
rrsba_disabled = true;
2168
return;
2169
}
2170
2171
if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
2172
return;
2173
2174
x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
2175
update_spec_ctrl(x86_spec_ctrl_base);
2176
rrsba_disabled = true;
2177
}
2178
2179
static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
2180
{
2181
/*
2182
* WARNING! There are many subtleties to consider when changing *any*
2183
* code related to RSB-related mitigations. Before doing so, carefully
2184
* read the following document, and update if necessary:
2185
*
2186
* Documentation/admin-guide/hw-vuln/rsb.rst
2187
*
2188
* In an overly simplified nutshell:
2189
*
2190
* - User->user RSB attacks are conditionally mitigated during
2191
* context switches by cond_mitigation -> write_ibpb().
2192
*
2193
* - User->kernel and guest->host attacks are mitigated by eIBRS or
2194
* RSB filling.
2195
*
2196
* Though, depending on config, note that other alternative
2197
* mitigations may end up getting used instead, e.g., IBPB on
2198
* entry/vmexit, call depth tracking, or return thunks.
2199
*/
2200
2201
switch (mode) {
2202
case SPECTRE_V2_NONE:
2203
break;
2204
2205
case SPECTRE_V2_EIBRS:
2206
case SPECTRE_V2_EIBRS_LFENCE:
2207
case SPECTRE_V2_EIBRS_RETPOLINE:
2208
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
2209
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
2210
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
2211
}
2212
break;
2213
2214
case SPECTRE_V2_RETPOLINE:
2215
case SPECTRE_V2_LFENCE:
2216
case SPECTRE_V2_IBRS:
2217
pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
2218
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
2219
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
2220
break;
2221
2222
default:
2223
pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
2224
dump_stack();
2225
break;
2226
}
2227
}
2228
2229
/*
2230
* Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
2231
* branch history in userspace. Not needed if BHI_NO is set.
2232
*/
2233
static bool __init spec_ctrl_bhi_dis(void)
2234
{
2235
if (!boot_cpu_has(X86_FEATURE_BHI_CTRL))
2236
return false;
2237
2238
x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S;
2239
update_spec_ctrl(x86_spec_ctrl_base);
2240
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW);
2241
2242
return true;
2243
}
2244
2245
enum bhi_mitigations {
2246
BHI_MITIGATION_OFF,
2247
BHI_MITIGATION_AUTO,
2248
BHI_MITIGATION_ON,
2249
BHI_MITIGATION_VMEXIT_ONLY,
2250
};
2251
2252
static enum bhi_mitigations bhi_mitigation __ro_after_init =
2253
IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF;
2254
2255
static int __init spectre_bhi_parse_cmdline(char *str)
2256
{
2257
if (!str)
2258
return -EINVAL;
2259
2260
if (!strcmp(str, "off"))
2261
bhi_mitigation = BHI_MITIGATION_OFF;
2262
else if (!strcmp(str, "on"))
2263
bhi_mitigation = BHI_MITIGATION_ON;
2264
else if (!strcmp(str, "vmexit"))
2265
bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
2266
else
2267
pr_err("Ignoring unknown spectre_bhi option (%s)", str);
2268
2269
return 0;
2270
}
2271
early_param("spectre_bhi", spectre_bhi_parse_cmdline);
2272
2273
static void __init bhi_select_mitigation(void)
2274
{
2275
if (!boot_cpu_has(X86_BUG_BHI))
2276
bhi_mitigation = BHI_MITIGATION_OFF;
2277
2278
if (bhi_mitigation != BHI_MITIGATION_AUTO)
2279
return;
2280
2281
if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) {
2282
if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
2283
bhi_mitigation = BHI_MITIGATION_ON;
2284
else
2285
bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
2286
} else {
2287
bhi_mitigation = BHI_MITIGATION_OFF;
2288
}
2289
}
2290
2291
static void __init bhi_update_mitigation(void)
2292
{
2293
if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE)
2294
bhi_mitigation = BHI_MITIGATION_OFF;
2295
2296
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
2297
spectre_v2_cmd == SPECTRE_V2_CMD_AUTO)
2298
bhi_mitigation = BHI_MITIGATION_OFF;
2299
}
2300
2301
static void __init bhi_apply_mitigation(void)
2302
{
2303
if (bhi_mitigation == BHI_MITIGATION_OFF)
2304
return;
2305
2306
/* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
2307
if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
2308
!boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
2309
spec_ctrl_disable_kernel_rrsba();
2310
if (rrsba_disabled)
2311
return;
2312
}
2313
2314
if (!IS_ENABLED(CONFIG_X86_64))
2315
return;
2316
2317
/* Mitigate in hardware if supported */
2318
if (spec_ctrl_bhi_dis())
2319
return;
2320
2321
if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) {
2322
pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n");
2323
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2324
return;
2325
}
2326
2327
pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n");
2328
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
2329
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2330
}
2331
2332
static void __init spectre_v2_select_mitigation(void)
2333
{
2334
spectre_v2_cmd = spectre_v2_parse_cmdline();
2335
2336
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
2337
(spectre_v2_cmd == SPECTRE_V2_CMD_NONE || spectre_v2_cmd == SPECTRE_V2_CMD_AUTO))
2338
return;
2339
2340
switch (spectre_v2_cmd) {
2341
case SPECTRE_V2_CMD_NONE:
2342
return;
2343
2344
case SPECTRE_V2_CMD_AUTO:
2345
if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2))
2346
break;
2347
fallthrough;
2348
case SPECTRE_V2_CMD_FORCE:
2349
if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
2350
spectre_v2_enabled = SPECTRE_V2_EIBRS;
2351
break;
2352
}
2353
2354
spectre_v2_enabled = spectre_v2_select_retpoline();
2355
break;
2356
2357
case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
2358
pr_err(SPECTRE_V2_LFENCE_MSG);
2359
spectre_v2_enabled = SPECTRE_V2_LFENCE;
2360
break;
2361
2362
case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
2363
spectre_v2_enabled = SPECTRE_V2_RETPOLINE;
2364
break;
2365
2366
case SPECTRE_V2_CMD_RETPOLINE:
2367
spectre_v2_enabled = spectre_v2_select_retpoline();
2368
break;
2369
2370
case SPECTRE_V2_CMD_IBRS:
2371
spectre_v2_enabled = SPECTRE_V2_IBRS;
2372
break;
2373
2374
case SPECTRE_V2_CMD_EIBRS:
2375
spectre_v2_enabled = SPECTRE_V2_EIBRS;
2376
break;
2377
2378
case SPECTRE_V2_CMD_EIBRS_LFENCE:
2379
spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE;
2380
break;
2381
2382
case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
2383
spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE;
2384
break;
2385
}
2386
}
2387
2388
static void __init spectre_v2_update_mitigation(void)
2389
{
2390
if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO &&
2391
!spectre_v2_in_eibrs_mode(spectre_v2_enabled)) {
2392
if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) &&
2393
boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2394
retbleed_mitigation != RETBLEED_MITIGATION_NONE &&
2395
retbleed_mitigation != RETBLEED_MITIGATION_STUFF &&
2396
boot_cpu_has(X86_FEATURE_IBRS) &&
2397
boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
2398
spectre_v2_enabled = SPECTRE_V2_IBRS;
2399
}
2400
}
2401
2402
if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2403
pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]);
2404
}
2405
2406
static void __init spectre_v2_apply_mitigation(void)
2407
{
2408
if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2409
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
2410
2411
if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2412
if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
2413
msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
2414
} else {
2415
x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
2416
update_spec_ctrl(x86_spec_ctrl_base);
2417
}
2418
}
2419
2420
switch (spectre_v2_enabled) {
2421
case SPECTRE_V2_NONE:
2422
return;
2423
2424
case SPECTRE_V2_EIBRS:
2425
break;
2426
2427
case SPECTRE_V2_IBRS:
2428
setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
2429
if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
2430
pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
2431
break;
2432
2433
case SPECTRE_V2_LFENCE:
2434
case SPECTRE_V2_EIBRS_LFENCE:
2435
setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
2436
fallthrough;
2437
2438
case SPECTRE_V2_RETPOLINE:
2439
case SPECTRE_V2_EIBRS_RETPOLINE:
2440
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
2441
break;
2442
}
2443
2444
/*
2445
* Disable alternate RSB predictions in kernel when indirect CALLs and
2446
* JMPs gets protection against BHI and Intramode-BTI, but RET
2447
* prediction from a non-RSB predictor is still a risk.
2448
*/
2449
if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE ||
2450
spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE ||
2451
spectre_v2_enabled == SPECTRE_V2_RETPOLINE)
2452
spec_ctrl_disable_kernel_rrsba();
2453
2454
spectre_v2_select_rsb_mitigation(spectre_v2_enabled);
2455
2456
/*
2457
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
2458
* and Enhanced IBRS protect firmware too, so enable IBRS around
2459
* firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
2460
* otherwise enabled.
2461
*
2462
* Use "spectre_v2_enabled" to check Enhanced IBRS instead of
2463
* boot_cpu_has(), because the user might select retpoline on the kernel
2464
* command line and if the CPU supports Enhanced IBRS, kernel might
2465
* un-intentionally not enable IBRS around firmware calls.
2466
*/
2467
if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2468
boot_cpu_has(X86_FEATURE_IBPB) &&
2469
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
2470
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
2471
2472
if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) {
2473
setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
2474
pr_info("Enabling Speculation Barrier for firmware calls\n");
2475
}
2476
2477
} else if (boot_cpu_has(X86_FEATURE_IBRS) &&
2478
!spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2479
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
2480
pr_info("Enabling Restricted Speculation for firmware calls\n");
2481
}
2482
}
2483
2484
static void update_stibp_msr(void * __unused)
2485
{
2486
u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
2487
update_spec_ctrl(val);
2488
}
2489
2490
/* Update x86_spec_ctrl_base in case SMT state changed. */
2491
static void update_stibp_strict(void)
2492
{
2493
u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
2494
2495
if (sched_smt_active())
2496
mask |= SPEC_CTRL_STIBP;
2497
2498
if (mask == x86_spec_ctrl_base)
2499
return;
2500
2501
pr_info("Update user space SMT mitigation: STIBP %s\n",
2502
mask & SPEC_CTRL_STIBP ? "always-on" : "off");
2503
x86_spec_ctrl_base = mask;
2504
on_each_cpu(update_stibp_msr, NULL, 1);
2505
}
2506
2507
/* Update the static key controlling the evaluation of TIF_SPEC_IB */
2508
static void update_indir_branch_cond(void)
2509
{
2510
if (sched_smt_active())
2511
static_branch_enable(&switch_to_cond_stibp);
2512
else
2513
static_branch_disable(&switch_to_cond_stibp);
2514
}
2515
2516
#undef pr_fmt
2517
#define pr_fmt(fmt) fmt
2518
2519
/* Update the static key controlling the MDS CPU buffer clear in idle */
2520
static void update_mds_branch_idle(void)
2521
{
2522
/*
2523
* Enable the idle clearing if SMT is active on CPUs which are
2524
* affected only by MSBDS and not any other MDS variant.
2525
*
2526
* The other variants cannot be mitigated when SMT is enabled, so
2527
* clearing the buffers on idle just to prevent the Store Buffer
2528
* repartitioning leak would be a window dressing exercise.
2529
*/
2530
if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
2531
return;
2532
2533
if (sched_smt_active()) {
2534
static_branch_enable(&cpu_buf_idle_clear);
2535
} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
2536
(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
2537
static_branch_disable(&cpu_buf_idle_clear);
2538
}
2539
}
2540
2541
#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
2542
#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
2543
#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
2544
2545
void cpu_bugs_smt_update(void)
2546
{
2547
mutex_lock(&spec_ctrl_mutex);
2548
2549
if (sched_smt_active() && unprivileged_ebpf_enabled() &&
2550
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
2551
pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
2552
2553
switch (spectre_v2_user_stibp) {
2554
case SPECTRE_V2_USER_NONE:
2555
break;
2556
case SPECTRE_V2_USER_STRICT:
2557
case SPECTRE_V2_USER_STRICT_PREFERRED:
2558
update_stibp_strict();
2559
break;
2560
case SPECTRE_V2_USER_PRCTL:
2561
case SPECTRE_V2_USER_SECCOMP:
2562
update_indir_branch_cond();
2563
break;
2564
}
2565
2566
switch (mds_mitigation) {
2567
case MDS_MITIGATION_FULL:
2568
case MDS_MITIGATION_AUTO:
2569
case MDS_MITIGATION_VMWERV:
2570
if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
2571
pr_warn_once(MDS_MSG_SMT);
2572
update_mds_branch_idle();
2573
break;
2574
case MDS_MITIGATION_OFF:
2575
break;
2576
}
2577
2578
switch (taa_mitigation) {
2579
case TAA_MITIGATION_VERW:
2580
case TAA_MITIGATION_AUTO:
2581
case TAA_MITIGATION_UCODE_NEEDED:
2582
if (sched_smt_active())
2583
pr_warn_once(TAA_MSG_SMT);
2584
break;
2585
case TAA_MITIGATION_TSX_DISABLED:
2586
case TAA_MITIGATION_OFF:
2587
break;
2588
}
2589
2590
switch (mmio_mitigation) {
2591
case MMIO_MITIGATION_VERW:
2592
case MMIO_MITIGATION_AUTO:
2593
case MMIO_MITIGATION_UCODE_NEEDED:
2594
if (sched_smt_active())
2595
pr_warn_once(MMIO_MSG_SMT);
2596
break;
2597
case MMIO_MITIGATION_OFF:
2598
break;
2599
}
2600
2601
switch (tsa_mitigation) {
2602
case TSA_MITIGATION_USER_KERNEL:
2603
case TSA_MITIGATION_VM:
2604
case TSA_MITIGATION_AUTO:
2605
case TSA_MITIGATION_FULL:
2606
/*
2607
* TSA-SQ can potentially lead to info leakage between
2608
* SMT threads.
2609
*/
2610
if (sched_smt_active())
2611
static_branch_enable(&cpu_buf_idle_clear);
2612
else
2613
static_branch_disable(&cpu_buf_idle_clear);
2614
break;
2615
case TSA_MITIGATION_NONE:
2616
case TSA_MITIGATION_UCODE_NEEDED:
2617
break;
2618
}
2619
2620
mutex_unlock(&spec_ctrl_mutex);
2621
}
2622
2623
#undef pr_fmt
2624
#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
2625
2626
static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
2627
2628
/* The kernel command line selection */
2629
enum ssb_mitigation_cmd {
2630
SPEC_STORE_BYPASS_CMD_NONE,
2631
SPEC_STORE_BYPASS_CMD_AUTO,
2632
SPEC_STORE_BYPASS_CMD_ON,
2633
SPEC_STORE_BYPASS_CMD_PRCTL,
2634
SPEC_STORE_BYPASS_CMD_SECCOMP,
2635
};
2636
2637
static const char * const ssb_strings[] = {
2638
[SPEC_STORE_BYPASS_NONE] = "Vulnerable",
2639
[SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
2640
[SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
2641
[SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
2642
};
2643
2644
static const struct {
2645
const char *option;
2646
enum ssb_mitigation_cmd cmd;
2647
} ssb_mitigation_options[] __initconst = {
2648
{ "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
2649
{ "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
2650
{ "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
2651
{ "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
2652
{ "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
2653
};
2654
2655
static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
2656
{
2657
enum ssb_mitigation_cmd cmd;
2658
char arg[20];
2659
int ret, i;
2660
2661
cmd = IS_ENABLED(CONFIG_MITIGATION_SSB) ?
2662
SPEC_STORE_BYPASS_CMD_AUTO : SPEC_STORE_BYPASS_CMD_NONE;
2663
if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
2664
cpu_mitigations_off()) {
2665
return SPEC_STORE_BYPASS_CMD_NONE;
2666
} else {
2667
ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
2668
arg, sizeof(arg));
2669
if (ret < 0)
2670
return cmd;
2671
2672
for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
2673
if (!match_option(arg, ret, ssb_mitigation_options[i].option))
2674
continue;
2675
2676
cmd = ssb_mitigation_options[i].cmd;
2677
break;
2678
}
2679
2680
if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
2681
pr_err("unknown option (%s). Switching to default mode\n", arg);
2682
return cmd;
2683
}
2684
}
2685
2686
return cmd;
2687
}
2688
2689
static void __init ssb_select_mitigation(void)
2690
{
2691
enum ssb_mitigation_cmd cmd;
2692
2693
if (!boot_cpu_has(X86_FEATURE_SSBD))
2694
goto out;
2695
2696
cmd = ssb_parse_cmdline();
2697
if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
2698
(cmd == SPEC_STORE_BYPASS_CMD_NONE ||
2699
cmd == SPEC_STORE_BYPASS_CMD_AUTO))
2700
return;
2701
2702
switch (cmd) {
2703
case SPEC_STORE_BYPASS_CMD_SECCOMP:
2704
/*
2705
* Choose prctl+seccomp as the default mode if seccomp is
2706
* enabled.
2707
*/
2708
if (IS_ENABLED(CONFIG_SECCOMP))
2709
ssb_mode = SPEC_STORE_BYPASS_SECCOMP;
2710
else
2711
ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2712
break;
2713
case SPEC_STORE_BYPASS_CMD_ON:
2714
ssb_mode = SPEC_STORE_BYPASS_DISABLE;
2715
break;
2716
case SPEC_STORE_BYPASS_CMD_AUTO:
2717
if (should_mitigate_vuln(X86_BUG_SPEC_STORE_BYPASS))
2718
ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2719
else
2720
ssb_mode = SPEC_STORE_BYPASS_NONE;
2721
break;
2722
case SPEC_STORE_BYPASS_CMD_PRCTL:
2723
ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2724
break;
2725
case SPEC_STORE_BYPASS_CMD_NONE:
2726
break;
2727
}
2728
2729
out:
2730
if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2731
pr_info("%s\n", ssb_strings[ssb_mode]);
2732
}
2733
2734
static void __init ssb_apply_mitigation(void)
2735
{
2736
/*
2737
* We have three CPU feature flags that are in play here:
2738
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
2739
* - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
2740
* - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
2741
*/
2742
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) {
2743
setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
2744
/*
2745
* Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
2746
* use a completely different MSR and bit dependent on family.
2747
*/
2748
if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
2749
!static_cpu_has(X86_FEATURE_AMD_SSBD)) {
2750
x86_amd_ssb_disable();
2751
} else {
2752
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
2753
update_spec_ctrl(x86_spec_ctrl_base);
2754
}
2755
}
2756
}
2757
2758
#undef pr_fmt
2759
#define pr_fmt(fmt) "Speculation prctl: " fmt
2760
2761
static void task_update_spec_tif(struct task_struct *tsk)
2762
{
2763
/* Force the update of the real TIF bits */
2764
set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
2765
2766
/*
2767
* Immediately update the speculation control MSRs for the current
2768
* task, but for a non-current task delay setting the CPU
2769
* mitigation until it is scheduled next.
2770
*
2771
* This can only happen for SECCOMP mitigation. For PRCTL it's
2772
* always the current task.
2773
*/
2774
if (tsk == current)
2775
speculation_ctrl_update_current();
2776
}
2777
2778
static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
2779
{
2780
2781
if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2782
return -EPERM;
2783
2784
switch (ctrl) {
2785
case PR_SPEC_ENABLE:
2786
set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2787
return 0;
2788
case PR_SPEC_DISABLE:
2789
clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2790
return 0;
2791
default:
2792
return -ERANGE;
2793
}
2794
}
2795
2796
static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
2797
{
2798
if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
2799
ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
2800
return -ENXIO;
2801
2802
switch (ctrl) {
2803
case PR_SPEC_ENABLE:
2804
/* If speculation is force disabled, enable is not allowed */
2805
if (task_spec_ssb_force_disable(task))
2806
return -EPERM;
2807
task_clear_spec_ssb_disable(task);
2808
task_clear_spec_ssb_noexec(task);
2809
task_update_spec_tif(task);
2810
break;
2811
case PR_SPEC_DISABLE:
2812
task_set_spec_ssb_disable(task);
2813
task_clear_spec_ssb_noexec(task);
2814
task_update_spec_tif(task);
2815
break;
2816
case PR_SPEC_FORCE_DISABLE:
2817
task_set_spec_ssb_disable(task);
2818
task_set_spec_ssb_force_disable(task);
2819
task_clear_spec_ssb_noexec(task);
2820
task_update_spec_tif(task);
2821
break;
2822
case PR_SPEC_DISABLE_NOEXEC:
2823
if (task_spec_ssb_force_disable(task))
2824
return -EPERM;
2825
task_set_spec_ssb_disable(task);
2826
task_set_spec_ssb_noexec(task);
2827
task_update_spec_tif(task);
2828
break;
2829
default:
2830
return -ERANGE;
2831
}
2832
return 0;
2833
}
2834
2835
static bool is_spec_ib_user_controlled(void)
2836
{
2837
return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
2838
spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2839
spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
2840
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
2841
}
2842
2843
static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
2844
{
2845
switch (ctrl) {
2846
case PR_SPEC_ENABLE:
2847
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2848
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2849
return 0;
2850
2851
/*
2852
* With strict mode for both IBPB and STIBP, the instruction
2853
* code paths avoid checking this task flag and instead,
2854
* unconditionally run the instruction. However, STIBP and IBPB
2855
* are independent and either can be set to conditionally
2856
* enabled regardless of the mode of the other.
2857
*
2858
* If either is set to conditional, allow the task flag to be
2859
* updated, unless it was force-disabled by a previous prctl
2860
* call. Currently, this is possible on an AMD CPU which has the
2861
* feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
2862
* kernel is booted with 'spectre_v2_user=seccomp', then
2863
* spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
2864
* spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
2865
*/
2866
if (!is_spec_ib_user_controlled() ||
2867
task_spec_ib_force_disable(task))
2868
return -EPERM;
2869
2870
task_clear_spec_ib_disable(task);
2871
task_update_spec_tif(task);
2872
break;
2873
case PR_SPEC_DISABLE:
2874
case PR_SPEC_FORCE_DISABLE:
2875
/*
2876
* Indirect branch speculation is always allowed when
2877
* mitigation is force disabled.
2878
*/
2879
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2880
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2881
return -EPERM;
2882
2883
if (!is_spec_ib_user_controlled())
2884
return 0;
2885
2886
task_set_spec_ib_disable(task);
2887
if (ctrl == PR_SPEC_FORCE_DISABLE)
2888
task_set_spec_ib_force_disable(task);
2889
task_update_spec_tif(task);
2890
if (task == current)
2891
indirect_branch_prediction_barrier();
2892
break;
2893
default:
2894
return -ERANGE;
2895
}
2896
return 0;
2897
}
2898
2899
int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
2900
unsigned long ctrl)
2901
{
2902
switch (which) {
2903
case PR_SPEC_STORE_BYPASS:
2904
return ssb_prctl_set(task, ctrl);
2905
case PR_SPEC_INDIRECT_BRANCH:
2906
return ib_prctl_set(task, ctrl);
2907
case PR_SPEC_L1D_FLUSH:
2908
return l1d_flush_prctl_set(task, ctrl);
2909
default:
2910
return -ENODEV;
2911
}
2912
}
2913
2914
#ifdef CONFIG_SECCOMP
2915
void arch_seccomp_spec_mitigate(struct task_struct *task)
2916
{
2917
if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
2918
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2919
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2920
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
2921
ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2922
}
2923
#endif
2924
2925
static int l1d_flush_prctl_get(struct task_struct *task)
2926
{
2927
if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2928
return PR_SPEC_FORCE_DISABLE;
2929
2930
if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
2931
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2932
else
2933
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2934
}
2935
2936
static int ssb_prctl_get(struct task_struct *task)
2937
{
2938
switch (ssb_mode) {
2939
case SPEC_STORE_BYPASS_NONE:
2940
if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2941
return PR_SPEC_ENABLE;
2942
return PR_SPEC_NOT_AFFECTED;
2943
case SPEC_STORE_BYPASS_DISABLE:
2944
return PR_SPEC_DISABLE;
2945
case SPEC_STORE_BYPASS_SECCOMP:
2946
case SPEC_STORE_BYPASS_PRCTL:
2947
if (task_spec_ssb_force_disable(task))
2948
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2949
if (task_spec_ssb_noexec(task))
2950
return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
2951
if (task_spec_ssb_disable(task))
2952
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2953
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2954
}
2955
BUG();
2956
}
2957
2958
static int ib_prctl_get(struct task_struct *task)
2959
{
2960
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2961
return PR_SPEC_NOT_AFFECTED;
2962
2963
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2964
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2965
return PR_SPEC_ENABLE;
2966
else if (is_spec_ib_user_controlled()) {
2967
if (task_spec_ib_force_disable(task))
2968
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2969
if (task_spec_ib_disable(task))
2970
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2971
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2972
} else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
2973
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2974
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
2975
return PR_SPEC_DISABLE;
2976
else
2977
return PR_SPEC_NOT_AFFECTED;
2978
}
2979
2980
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
2981
{
2982
switch (which) {
2983
case PR_SPEC_STORE_BYPASS:
2984
return ssb_prctl_get(task);
2985
case PR_SPEC_INDIRECT_BRANCH:
2986
return ib_prctl_get(task);
2987
case PR_SPEC_L1D_FLUSH:
2988
return l1d_flush_prctl_get(task);
2989
default:
2990
return -ENODEV;
2991
}
2992
}
2993
2994
void x86_spec_ctrl_setup_ap(void)
2995
{
2996
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
2997
update_spec_ctrl(x86_spec_ctrl_base);
2998
2999
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
3000
x86_amd_ssb_disable();
3001
}
3002
3003
bool itlb_multihit_kvm_mitigation;
3004
EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
3005
3006
#undef pr_fmt
3007
#define pr_fmt(fmt) "L1TF: " fmt
3008
3009
/* Default mitigation for L1TF-affected CPUs */
3010
enum l1tf_mitigations l1tf_mitigation __ro_after_init =
3011
IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF;
3012
#if IS_ENABLED(CONFIG_KVM_INTEL)
3013
EXPORT_SYMBOL_GPL(l1tf_mitigation);
3014
#endif
3015
enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
3016
EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
3017
3018
/*
3019
* These CPUs all support 44bits physical address space internally in the
3020
* cache but CPUID can report a smaller number of physical address bits.
3021
*
3022
* The L1TF mitigation uses the top most address bit for the inversion of
3023
* non present PTEs. When the installed memory reaches into the top most
3024
* address bit due to memory holes, which has been observed on machines
3025
* which report 36bits physical address bits and have 32G RAM installed,
3026
* then the mitigation range check in l1tf_select_mitigation() triggers.
3027
* This is a false positive because the mitigation is still possible due to
3028
* the fact that the cache uses 44bit internally. Use the cache bits
3029
* instead of the reported physical bits and adjust them on the affected
3030
* machines to 44bit if the reported bits are less than 44.
3031
*/
3032
static void override_cache_bits(struct cpuinfo_x86 *c)
3033
{
3034
if (c->x86 != 6)
3035
return;
3036
3037
switch (c->x86_vfm) {
3038
case INTEL_NEHALEM:
3039
case INTEL_WESTMERE:
3040
case INTEL_SANDYBRIDGE:
3041
case INTEL_IVYBRIDGE:
3042
case INTEL_HASWELL:
3043
case INTEL_HASWELL_L:
3044
case INTEL_HASWELL_G:
3045
case INTEL_BROADWELL:
3046
case INTEL_BROADWELL_G:
3047
case INTEL_SKYLAKE_L:
3048
case INTEL_SKYLAKE:
3049
case INTEL_KABYLAKE_L:
3050
case INTEL_KABYLAKE:
3051
if (c->x86_cache_bits < 44)
3052
c->x86_cache_bits = 44;
3053
break;
3054
}
3055
}
3056
3057
static void __init l1tf_select_mitigation(void)
3058
{
3059
if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
3060
l1tf_mitigation = L1TF_MITIGATION_OFF;
3061
return;
3062
}
3063
3064
if (l1tf_mitigation != L1TF_MITIGATION_AUTO)
3065
return;
3066
3067
if (!should_mitigate_vuln(X86_BUG_L1TF)) {
3068
l1tf_mitigation = L1TF_MITIGATION_OFF;
3069
return;
3070
}
3071
3072
if (smt_mitigations == SMT_MITIGATIONS_ON)
3073
l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
3074
else
3075
l1tf_mitigation = L1TF_MITIGATION_FLUSH;
3076
}
3077
3078
static void __init l1tf_apply_mitigation(void)
3079
{
3080
u64 half_pa;
3081
3082
if (!boot_cpu_has_bug(X86_BUG_L1TF))
3083
return;
3084
3085
override_cache_bits(&boot_cpu_data);
3086
3087
switch (l1tf_mitigation) {
3088
case L1TF_MITIGATION_OFF:
3089
case L1TF_MITIGATION_FLUSH_NOWARN:
3090
case L1TF_MITIGATION_FLUSH:
3091
case L1TF_MITIGATION_AUTO:
3092
break;
3093
case L1TF_MITIGATION_FLUSH_NOSMT:
3094
case L1TF_MITIGATION_FULL:
3095
cpu_smt_disable(false);
3096
break;
3097
case L1TF_MITIGATION_FULL_FORCE:
3098
cpu_smt_disable(true);
3099
break;
3100
}
3101
3102
#if CONFIG_PGTABLE_LEVELS == 2
3103
pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
3104
return;
3105
#endif
3106
3107
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
3108
if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
3109
e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
3110
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
3111
pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
3112
half_pa);
3113
pr_info("However, doing so will make a part of your RAM unusable.\n");
3114
pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
3115
return;
3116
}
3117
3118
setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
3119
}
3120
3121
static int __init l1tf_cmdline(char *str)
3122
{
3123
if (!boot_cpu_has_bug(X86_BUG_L1TF))
3124
return 0;
3125
3126
if (!str)
3127
return -EINVAL;
3128
3129
if (!strcmp(str, "off"))
3130
l1tf_mitigation = L1TF_MITIGATION_OFF;
3131
else if (!strcmp(str, "flush,nowarn"))
3132
l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
3133
else if (!strcmp(str, "flush"))
3134
l1tf_mitigation = L1TF_MITIGATION_FLUSH;
3135
else if (!strcmp(str, "flush,nosmt"))
3136
l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
3137
else if (!strcmp(str, "full"))
3138
l1tf_mitigation = L1TF_MITIGATION_FULL;
3139
else if (!strcmp(str, "full,force"))
3140
l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
3141
3142
return 0;
3143
}
3144
early_param("l1tf", l1tf_cmdline);
3145
3146
#undef pr_fmt
3147
#define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt
3148
3149
static const char * const srso_strings[] = {
3150
[SRSO_MITIGATION_NONE] = "Vulnerable",
3151
[SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
3152
[SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode",
3153
[SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
3154
[SRSO_MITIGATION_NOSMT] = "Mitigation: SMT disabled",
3155
[SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
3156
[SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
3157
[SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only",
3158
[SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation"
3159
};
3160
3161
static int __init srso_parse_cmdline(char *str)
3162
{
3163
if (!str)
3164
return -EINVAL;
3165
3166
if (!strcmp(str, "off"))
3167
srso_mitigation = SRSO_MITIGATION_NONE;
3168
else if (!strcmp(str, "microcode"))
3169
srso_mitigation = SRSO_MITIGATION_MICROCODE;
3170
else if (!strcmp(str, "safe-ret"))
3171
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
3172
else if (!strcmp(str, "ibpb"))
3173
srso_mitigation = SRSO_MITIGATION_IBPB;
3174
else if (!strcmp(str, "ibpb-vmexit"))
3175
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
3176
else
3177
pr_err("Ignoring unknown SRSO option (%s).", str);
3178
3179
return 0;
3180
}
3181
early_param("spec_rstack_overflow", srso_parse_cmdline);
3182
3183
#define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
3184
3185
static void __init srso_select_mitigation(void)
3186
{
3187
if (!boot_cpu_has_bug(X86_BUG_SRSO)) {
3188
srso_mitigation = SRSO_MITIGATION_NONE;
3189
return;
3190
}
3191
3192
if (srso_mitigation == SRSO_MITIGATION_AUTO) {
3193
/*
3194
* Use safe-RET if user->kernel or guest->host protection is
3195
* required. Otherwise the 'microcode' mitigation is sufficient
3196
* to protect the user->user and guest->guest vectors.
3197
*/
3198
if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
3199
(cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) &&
3200
!boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) {
3201
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
3202
} else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
3203
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
3204
srso_mitigation = SRSO_MITIGATION_MICROCODE;
3205
} else {
3206
srso_mitigation = SRSO_MITIGATION_NONE;
3207
return;
3208
}
3209
}
3210
3211
/* Zen1/2 with SMT off aren't vulnerable to SRSO. */
3212
if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
3213
srso_mitigation = SRSO_MITIGATION_NOSMT;
3214
return;
3215
}
3216
3217
if (!boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) {
3218
pr_warn("IBPB-extending microcode not applied!\n");
3219
pr_warn(SRSO_NOTICE);
3220
3221
/*
3222
* Safe-RET provides partial mitigation without microcode, but
3223
* other mitigations require microcode to provide any
3224
* mitigations.
3225
*/
3226
if (srso_mitigation == SRSO_MITIGATION_SAFE_RET)
3227
srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
3228
else
3229
srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
3230
}
3231
3232
switch (srso_mitigation) {
3233
case SRSO_MITIGATION_SAFE_RET:
3234
case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
3235
if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) {
3236
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
3237
goto ibpb_on_vmexit;
3238
}
3239
3240
if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
3241
pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
3242
srso_mitigation = SRSO_MITIGATION_NONE;
3243
}
3244
break;
3245
ibpb_on_vmexit:
3246
case SRSO_MITIGATION_IBPB_ON_VMEXIT:
3247
if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
3248
pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
3249
srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
3250
break;
3251
}
3252
fallthrough;
3253
case SRSO_MITIGATION_IBPB:
3254
if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
3255
pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
3256
srso_mitigation = SRSO_MITIGATION_NONE;
3257
}
3258
break;
3259
default:
3260
break;
3261
}
3262
}
3263
3264
static void __init srso_update_mitigation(void)
3265
{
3266
/* If retbleed is using IBPB, that works for SRSO as well */
3267
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB &&
3268
boot_cpu_has(X86_FEATURE_IBPB_BRTYPE))
3269
srso_mitigation = SRSO_MITIGATION_IBPB;
3270
3271
if (boot_cpu_has_bug(X86_BUG_SRSO) &&
3272
!cpu_mitigations_off())
3273
pr_info("%s\n", srso_strings[srso_mitigation]);
3274
}
3275
3276
static void __init srso_apply_mitigation(void)
3277
{
3278
/*
3279
* Clear the feature flag if this mitigation is not selected as that
3280
* feature flag controls the BpSpecReduce MSR bit toggling in KVM.
3281
*/
3282
if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
3283
setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
3284
3285
if (srso_mitigation == SRSO_MITIGATION_NONE) {
3286
if (boot_cpu_has(X86_FEATURE_SBPB))
3287
x86_pred_cmd = PRED_CMD_SBPB;
3288
return;
3289
}
3290
3291
switch (srso_mitigation) {
3292
case SRSO_MITIGATION_SAFE_RET:
3293
case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
3294
/*
3295
* Enable the return thunk for generated code
3296
* like ftrace, static_call, etc.
3297
*/
3298
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
3299
setup_force_cpu_cap(X86_FEATURE_UNRET);
3300
3301
if (boot_cpu_data.x86 == 0x19) {
3302
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
3303
set_return_thunk(srso_alias_return_thunk);
3304
} else {
3305
setup_force_cpu_cap(X86_FEATURE_SRSO);
3306
set_return_thunk(srso_return_thunk);
3307
}
3308
break;
3309
case SRSO_MITIGATION_IBPB:
3310
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
3311
/*
3312
* IBPB on entry already obviates the need for
3313
* software-based untraining so clear those in case some
3314
* other mitigation like Retbleed has selected them.
3315
*/
3316
setup_clear_cpu_cap(X86_FEATURE_UNRET);
3317
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
3318
fallthrough;
3319
case SRSO_MITIGATION_IBPB_ON_VMEXIT:
3320
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
3321
/*
3322
* There is no need for RSB filling: entry_ibpb() ensures
3323
* all predictions, including the RSB, are invalidated,
3324
* regardless of IBPB implementation.
3325
*/
3326
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
3327
break;
3328
default:
3329
break;
3330
}
3331
}
3332
3333
#undef pr_fmt
3334
#define pr_fmt(fmt) fmt
3335
3336
#ifdef CONFIG_SYSFS
3337
3338
#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
3339
3340
#if IS_ENABLED(CONFIG_KVM_INTEL)
3341
static const char * const l1tf_vmx_states[] = {
3342
[VMENTER_L1D_FLUSH_AUTO] = "auto",
3343
[VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
3344
[VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
3345
[VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
3346
[VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
3347
[VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
3348
};
3349
3350
static ssize_t l1tf_show_state(char *buf)
3351
{
3352
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
3353
return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3354
3355
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
3356
(l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
3357
sched_smt_active())) {
3358
return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
3359
l1tf_vmx_states[l1tf_vmx_mitigation]);
3360
}
3361
3362
return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
3363
l1tf_vmx_states[l1tf_vmx_mitigation],
3364
sched_smt_active() ? "vulnerable" : "disabled");
3365
}
3366
3367
static ssize_t itlb_multihit_show_state(char *buf)
3368
{
3369
if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
3370
!boot_cpu_has(X86_FEATURE_VMX))
3371
return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n");
3372
else if (!(cr4_read_shadow() & X86_CR4_VMXE))
3373
return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n");
3374
else if (itlb_multihit_kvm_mitigation)
3375
return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
3376
else
3377
return sysfs_emit(buf, "KVM: Vulnerable\n");
3378
}
3379
#else
3380
static ssize_t l1tf_show_state(char *buf)
3381
{
3382
return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3383
}
3384
3385
static ssize_t itlb_multihit_show_state(char *buf)
3386
{
3387
return sysfs_emit(buf, "Processor vulnerable\n");
3388
}
3389
#endif
3390
3391
static ssize_t mds_show_state(char *buf)
3392
{
3393
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3394
return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3395
mds_strings[mds_mitigation]);
3396
}
3397
3398
if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
3399
return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3400
(mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
3401
sched_smt_active() ? "mitigated" : "disabled"));
3402
}
3403
3404
return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3405
sched_smt_active() ? "vulnerable" : "disabled");
3406
}
3407
3408
static ssize_t tsx_async_abort_show_state(char *buf)
3409
{
3410
if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
3411
(taa_mitigation == TAA_MITIGATION_OFF))
3412
return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
3413
3414
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3415
return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3416
taa_strings[taa_mitigation]);
3417
}
3418
3419
return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
3420
sched_smt_active() ? "vulnerable" : "disabled");
3421
}
3422
3423
static ssize_t mmio_stale_data_show_state(char *buf)
3424
{
3425
if (mmio_mitigation == MMIO_MITIGATION_OFF)
3426
return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
3427
3428
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3429
return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3430
mmio_strings[mmio_mitigation]);
3431
}
3432
3433
return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
3434
sched_smt_active() ? "vulnerable" : "disabled");
3435
}
3436
3437
static ssize_t rfds_show_state(char *buf)
3438
{
3439
return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
3440
}
3441
3442
static ssize_t old_microcode_show_state(char *buf)
3443
{
3444
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
3445
return sysfs_emit(buf, "Unknown: running under hypervisor");
3446
3447
return sysfs_emit(buf, "Vulnerable\n");
3448
}
3449
3450
static ssize_t its_show_state(char *buf)
3451
{
3452
return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
3453
}
3454
3455
static char *stibp_state(void)
3456
{
3457
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
3458
!boot_cpu_has(X86_FEATURE_AUTOIBRS))
3459
return "";
3460
3461
switch (spectre_v2_user_stibp) {
3462
case SPECTRE_V2_USER_NONE:
3463
return "; STIBP: disabled";
3464
case SPECTRE_V2_USER_STRICT:
3465
return "; STIBP: forced";
3466
case SPECTRE_V2_USER_STRICT_PREFERRED:
3467
return "; STIBP: always-on";
3468
case SPECTRE_V2_USER_PRCTL:
3469
case SPECTRE_V2_USER_SECCOMP:
3470
if (static_key_enabled(&switch_to_cond_stibp))
3471
return "; STIBP: conditional";
3472
}
3473
return "";
3474
}
3475
3476
static char *ibpb_state(void)
3477
{
3478
if (boot_cpu_has(X86_FEATURE_IBPB)) {
3479
if (static_key_enabled(&switch_mm_always_ibpb))
3480
return "; IBPB: always-on";
3481
if (static_key_enabled(&switch_mm_cond_ibpb))
3482
return "; IBPB: conditional";
3483
return "; IBPB: disabled";
3484
}
3485
return "";
3486
}
3487
3488
static char *pbrsb_eibrs_state(void)
3489
{
3490
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
3491
if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
3492
boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
3493
return "; PBRSB-eIBRS: SW sequence";
3494
else
3495
return "; PBRSB-eIBRS: Vulnerable";
3496
} else {
3497
return "; PBRSB-eIBRS: Not affected";
3498
}
3499
}
3500
3501
static const char *spectre_bhi_state(void)
3502
{
3503
if (!boot_cpu_has_bug(X86_BUG_BHI))
3504
return "; BHI: Not affected";
3505
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
3506
return "; BHI: BHI_DIS_S";
3507
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
3508
return "; BHI: SW loop, KVM: SW loop";
3509
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
3510
!boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
3511
rrsba_disabled)
3512
return "; BHI: Retpoline";
3513
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT))
3514
return "; BHI: Vulnerable, KVM: SW loop";
3515
3516
return "; BHI: Vulnerable";
3517
}
3518
3519
static ssize_t spectre_v2_show_state(char *buf)
3520
{
3521
if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
3522
return sysfs_emit(buf, "Vulnerable: LFENCE\n");
3523
3524
if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
3525
return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
3526
3527
if (sched_smt_active() && unprivileged_ebpf_enabled() &&
3528
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
3529
return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
3530
3531
return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n",
3532
spectre_v2_strings[spectre_v2_enabled],
3533
ibpb_state(),
3534
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "",
3535
stibp_state(),
3536
boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "",
3537
pbrsb_eibrs_state(),
3538
spectre_bhi_state(),
3539
/* this should always be at the end */
3540
spectre_v2_module_string());
3541
}
3542
3543
static ssize_t srbds_show_state(char *buf)
3544
{
3545
return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
3546
}
3547
3548
static ssize_t retbleed_show_state(char *buf)
3549
{
3550
if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
3551
retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
3552
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
3553
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
3554
return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
3555
3556
return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
3557
!sched_smt_active() ? "disabled" :
3558
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
3559
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
3560
"enabled with STIBP protection" : "vulnerable");
3561
}
3562
3563
return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
3564
}
3565
3566
static ssize_t srso_show_state(char *buf)
3567
{
3568
return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
3569
}
3570
3571
static ssize_t gds_show_state(char *buf)
3572
{
3573
return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
3574
}
3575
3576
static ssize_t tsa_show_state(char *buf)
3577
{
3578
return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
3579
}
3580
3581
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
3582
char *buf, unsigned int bug)
3583
{
3584
if (!boot_cpu_has_bug(bug))
3585
return sysfs_emit(buf, "Not affected\n");
3586
3587
switch (bug) {
3588
case X86_BUG_CPU_MELTDOWN:
3589
if (boot_cpu_has(X86_FEATURE_PTI))
3590
return sysfs_emit(buf, "Mitigation: PTI\n");
3591
3592
if (hypervisor_is_type(X86_HYPER_XEN_PV))
3593
return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
3594
3595
break;
3596
3597
case X86_BUG_SPECTRE_V1:
3598
return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
3599
3600
case X86_BUG_SPECTRE_V2:
3601
return spectre_v2_show_state(buf);
3602
3603
case X86_BUG_SPEC_STORE_BYPASS:
3604
return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
3605
3606
case X86_BUG_L1TF:
3607
if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
3608
return l1tf_show_state(buf);
3609
break;
3610
3611
case X86_BUG_MDS:
3612
return mds_show_state(buf);
3613
3614
case X86_BUG_TAA:
3615
return tsx_async_abort_show_state(buf);
3616
3617
case X86_BUG_ITLB_MULTIHIT:
3618
return itlb_multihit_show_state(buf);
3619
3620
case X86_BUG_SRBDS:
3621
return srbds_show_state(buf);
3622
3623
case X86_BUG_MMIO_STALE_DATA:
3624
return mmio_stale_data_show_state(buf);
3625
3626
case X86_BUG_RETBLEED:
3627
return retbleed_show_state(buf);
3628
3629
case X86_BUG_SRSO:
3630
return srso_show_state(buf);
3631
3632
case X86_BUG_GDS:
3633
return gds_show_state(buf);
3634
3635
case X86_BUG_RFDS:
3636
return rfds_show_state(buf);
3637
3638
case X86_BUG_OLD_MICROCODE:
3639
return old_microcode_show_state(buf);
3640
3641
case X86_BUG_ITS:
3642
return its_show_state(buf);
3643
3644
case X86_BUG_TSA:
3645
return tsa_show_state(buf);
3646
3647
default:
3648
break;
3649
}
3650
3651
return sysfs_emit(buf, "Vulnerable\n");
3652
}
3653
3654
ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
3655
{
3656
return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
3657
}
3658
3659
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
3660
{
3661
return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
3662
}
3663
3664
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
3665
{
3666
return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
3667
}
3668
3669
ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
3670
{
3671
return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
3672
}
3673
3674
ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
3675
{
3676
return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
3677
}
3678
3679
ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
3680
{
3681
return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
3682
}
3683
3684
ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
3685
{
3686
return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
3687
}
3688
3689
ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
3690
{
3691
return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
3692
}
3693
3694
ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
3695
{
3696
return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
3697
}
3698
3699
ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
3700
{
3701
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
3702
}
3703
3704
ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
3705
{
3706
return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
3707
}
3708
3709
ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
3710
{
3711
return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
3712
}
3713
3714
ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
3715
{
3716
return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
3717
}
3718
3719
ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
3720
{
3721
return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
3722
}
3723
3724
ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf)
3725
{
3726
return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE);
3727
}
3728
3729
ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
3730
{
3731
return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
3732
}
3733
3734
ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
3735
{
3736
return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
3737
}
3738
#endif
3739
3740
void __warn_thunk(void)
3741
{
3742
WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n");
3743
}
3744
3745