Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kernel/cpu/microcode/amd.c
49080 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* AMD CPU Microcode Update Driver for Linux
4
*
5
* This driver allows to upgrade microcode on F10h AMD
6
* CPUs and later.
7
*
8
* Copyright (C) 2008-2011 Advanced Micro Devices Inc.
9
* 2013-2018 Borislav Petkov <[email protected]>
10
*
11
* Author: Peter Oruba <[email protected]>
12
*
13
* Based on work by:
14
* Tigran Aivazian <[email protected]>
15
*
16
* early loader:
17
* Copyright (C) 2013 Advanced Micro Devices, Inc.
18
*
19
* Author: Jacob Shin <[email protected]>
20
* Fixes: Borislav Petkov <[email protected]>
21
*/
22
#define pr_fmt(fmt) "microcode: " fmt
23
24
#include <linux/earlycpio.h>
25
#include <linux/firmware.h>
26
#include <linux/bsearch.h>
27
#include <linux/uaccess.h>
28
#include <linux/vmalloc.h>
29
#include <linux/initrd.h>
30
#include <linux/kernel.h>
31
#include <linux/pci.h>
32
33
#include <crypto/sha2.h>
34
35
#include <asm/microcode.h>
36
#include <asm/processor.h>
37
#include <asm/cmdline.h>
38
#include <asm/setup.h>
39
#include <asm/cpu.h>
40
#include <asm/msr.h>
41
#include <asm/tlb.h>
42
43
#include "internal.h"
44
45
struct ucode_patch {
46
struct list_head plist;
47
void *data;
48
unsigned int size;
49
u32 patch_id;
50
u16 equiv_cpu;
51
};
52
53
static LIST_HEAD(microcode_cache);
54
55
#define UCODE_MAGIC 0x00414d44
56
#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
57
#define UCODE_UCODE_TYPE 0x00000001
58
59
#define SECTION_HDR_SIZE 8
60
#define CONTAINER_HDR_SZ 12
61
62
struct equiv_cpu_entry {
63
u32 installed_cpu;
64
u32 fixed_errata_mask;
65
u32 fixed_errata_compare;
66
u16 equiv_cpu;
67
u16 res;
68
} __packed;
69
70
struct microcode_header_amd {
71
u32 data_code;
72
u32 patch_id;
73
u16 mc_patch_data_id;
74
u8 mc_patch_data_len;
75
u8 init_flag;
76
u32 mc_patch_data_checksum;
77
u32 nb_dev_id;
78
u32 sb_dev_id;
79
u16 processor_rev_id;
80
u8 nb_rev_id;
81
u8 sb_rev_id;
82
u8 bios_api_rev;
83
u8 reserved1[3];
84
u32 match_reg[8];
85
} __packed;
86
87
struct microcode_amd {
88
struct microcode_header_amd hdr;
89
unsigned int mpb[];
90
};
91
92
static struct equiv_cpu_table {
93
unsigned int num_entries;
94
struct equiv_cpu_entry *entry;
95
} equiv_table;
96
97
union zen_patch_rev {
98
struct {
99
__u32 rev : 8,
100
stepping : 4,
101
model : 4,
102
__reserved : 4,
103
ext_model : 4,
104
ext_fam : 8;
105
};
106
__u32 ucode_rev;
107
};
108
109
union cpuid_1_eax {
110
struct {
111
__u32 stepping : 4,
112
model : 4,
113
family : 4,
114
__reserved0 : 4,
115
ext_model : 4,
116
ext_fam : 8,
117
__reserved1 : 4;
118
};
119
__u32 full;
120
};
121
122
/*
123
* This points to the current valid container of microcode patches which we will
124
* save from the initrd/builtin before jettisoning its contents. @mc is the
125
* microcode patch we found to match.
126
*/
127
struct cont_desc {
128
struct microcode_amd *mc;
129
u32 psize;
130
u8 *data;
131
size_t size;
132
};
133
134
/*
135
* Microcode patch container file is prepended to the initrd in cpio
136
* format. See Documentation/arch/x86/microcode.rst
137
*/
138
static const char
139
ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
140
141
/*
142
* This is CPUID(1).EAX on the BSP. It is used in two ways:
143
*
144
* 1. To ignore the equivalence table on Zen1 and newer.
145
*
146
* 2. To match which patches to load because the patch revision ID
147
* already contains the f/m/s for which the microcode is destined
148
* for.
149
*/
150
static u32 bsp_cpuid_1_eax __ro_after_init;
151
152
static bool sha_check = true;
153
154
struct patch_digest {
155
u32 patch_id;
156
u8 sha256[SHA256_DIGEST_SIZE];
157
};
158
159
#include "amd_shas.c"
160
161
static int cmp_id(const void *key, const void *elem)
162
{
163
struct patch_digest *pd = (struct patch_digest *)elem;
164
u32 patch_id = *(u32 *)key;
165
166
if (patch_id == pd->patch_id)
167
return 0;
168
else if (patch_id < pd->patch_id)
169
return -1;
170
else
171
return 1;
172
}
173
174
static u32 cpuid_to_ucode_rev(unsigned int val)
175
{
176
union zen_patch_rev p = {};
177
union cpuid_1_eax c;
178
179
c.full = val;
180
181
p.stepping = c.stepping;
182
p.model = c.model;
183
p.ext_model = c.ext_model;
184
p.ext_fam = c.ext_fam;
185
186
return p.ucode_rev;
187
}
188
189
static u32 get_cutoff_revision(u32 rev)
190
{
191
switch (rev >> 8) {
192
case 0x80012: return 0x8001277; break;
193
case 0x80082: return 0x800820f; break;
194
case 0x83010: return 0x830107c; break;
195
case 0x86001: return 0x860010e; break;
196
case 0x86081: return 0x8608108; break;
197
case 0x87010: return 0x8701034; break;
198
case 0x8a000: return 0x8a0000a; break;
199
case 0xa0010: return 0xa00107a; break;
200
case 0xa0011: return 0xa0011da; break;
201
case 0xa0012: return 0xa001243; break;
202
case 0xa0082: return 0xa00820e; break;
203
case 0xa1011: return 0xa101153; break;
204
case 0xa1012: return 0xa10124e; break;
205
case 0xa1081: return 0xa108109; break;
206
case 0xa2010: return 0xa20102f; break;
207
case 0xa2012: return 0xa201212; break;
208
case 0xa4041: return 0xa404109; break;
209
case 0xa5000: return 0xa500013; break;
210
case 0xa6012: return 0xa60120a; break;
211
case 0xa7041: return 0xa704109; break;
212
case 0xa7052: return 0xa705208; break;
213
case 0xa7080: return 0xa708009; break;
214
case 0xa70c0: return 0xa70C009; break;
215
case 0xaa001: return 0xaa00116; break;
216
case 0xaa002: return 0xaa00218; break;
217
case 0xb0021: return 0xb002146; break;
218
case 0xb0081: return 0xb008111; break;
219
case 0xb1010: return 0xb101046; break;
220
case 0xb2040: return 0xb204031; break;
221
case 0xb4040: return 0xb404031; break;
222
case 0xb4041: return 0xb404101; break;
223
case 0xb6000: return 0xb600031; break;
224
case 0xb6080: return 0xb608031; break;
225
case 0xb7000: return 0xb700031; break;
226
default: break;
227
228
}
229
return 0;
230
}
231
232
static bool need_sha_check(u32 cur_rev)
233
{
234
u32 cutoff;
235
236
if (!cur_rev) {
237
cur_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax);
238
pr_info_once("No current revision, generating the lowest one: 0x%x\n", cur_rev);
239
}
240
241
cutoff = get_cutoff_revision(cur_rev);
242
if (cutoff)
243
return cur_rev <= cutoff;
244
245
pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n");
246
pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev);
247
return true;
248
}
249
250
static bool cpu_has_entrysign(void)
251
{
252
unsigned int fam = x86_family(bsp_cpuid_1_eax);
253
unsigned int model = x86_model(bsp_cpuid_1_eax);
254
255
if (fam == 0x17 || fam == 0x19)
256
return true;
257
258
if (fam == 0x1a) {
259
if (model <= 0x2f ||
260
(0x40 <= model && model <= 0x4f) ||
261
(0x60 <= model && model <= 0x7f))
262
return true;
263
}
264
265
return false;
266
}
267
268
static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
269
{
270
struct patch_digest *pd = NULL;
271
u8 digest[SHA256_DIGEST_SIZE];
272
int i;
273
274
if (!cpu_has_entrysign())
275
return true;
276
277
if (!need_sha_check(cur_rev))
278
return true;
279
280
if (!sha_check)
281
return true;
282
283
pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id);
284
if (!pd) {
285
pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id);
286
return false;
287
}
288
289
sha256(data, len, digest);
290
291
if (memcmp(digest, pd->sha256, sizeof(digest))) {
292
pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id);
293
294
for (i = 0; i < SHA256_DIGEST_SIZE; i++)
295
pr_cont("0x%x ", digest[i]);
296
pr_info("\n");
297
298
return false;
299
}
300
301
return true;
302
}
303
304
static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val)
305
{
306
union zen_patch_rev p;
307
union cpuid_1_eax c;
308
309
p.ucode_rev = val;
310
c.full = 0;
311
312
c.stepping = p.stepping;
313
c.model = p.model;
314
c.ext_model = p.ext_model;
315
c.family = 0xf;
316
c.ext_fam = p.ext_fam;
317
318
return c;
319
}
320
321
static u32 get_patch_level(void)
322
{
323
u32 rev, dummy __always_unused;
324
325
if (IS_ENABLED(CONFIG_MICROCODE_DBG)) {
326
int cpu = smp_processor_id();
327
328
if (!microcode_rev[cpu]) {
329
if (!base_rev)
330
base_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax);
331
332
microcode_rev[cpu] = base_rev;
333
334
ucode_dbg("CPU%d, base_rev: 0x%x\n", cpu, base_rev);
335
}
336
337
return microcode_rev[cpu];
338
}
339
340
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
341
342
return rev;
343
}
344
345
static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
346
{
347
unsigned int i;
348
349
/* Zen and newer do not need an equivalence table. */
350
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
351
return 0;
352
353
if (!et || !et->num_entries)
354
return 0;
355
356
for (i = 0; i < et->num_entries; i++) {
357
struct equiv_cpu_entry *e = &et->entry[i];
358
359
if (sig == e->installed_cpu)
360
return e->equiv_cpu;
361
}
362
return 0;
363
}
364
365
/*
366
* Check whether there is a valid microcode container file at the beginning
367
* of @buf of size @buf_size.
368
*/
369
static bool verify_container(const u8 *buf, size_t buf_size)
370
{
371
u32 cont_magic;
372
373
if (buf_size <= CONTAINER_HDR_SZ) {
374
ucode_dbg("Truncated microcode container header.\n");
375
return false;
376
}
377
378
cont_magic = *(const u32 *)buf;
379
if (cont_magic != UCODE_MAGIC) {
380
ucode_dbg("Invalid magic value (0x%08x).\n", cont_magic);
381
return false;
382
}
383
384
return true;
385
}
386
387
/*
388
* Check whether there is a valid, non-truncated CPU equivalence table at the
389
* beginning of @buf of size @buf_size.
390
*/
391
static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
392
{
393
const u32 *hdr = (const u32 *)buf;
394
u32 cont_type, equiv_tbl_len;
395
396
if (!verify_container(buf, buf_size))
397
return false;
398
399
/* Zen and newer do not need an equivalence table. */
400
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
401
return true;
402
403
cont_type = hdr[1];
404
if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
405
ucode_dbg("Wrong microcode container equivalence table type: %u.\n",
406
cont_type);
407
return false;
408
}
409
410
buf_size -= CONTAINER_HDR_SZ;
411
412
equiv_tbl_len = hdr[2];
413
if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) ||
414
buf_size < equiv_tbl_len) {
415
ucode_dbg("Truncated equivalence table.\n");
416
return false;
417
}
418
419
return true;
420
}
421
422
/*
423
* Check whether there is a valid, non-truncated microcode patch section at the
424
* beginning of @buf of size @buf_size.
425
*
426
* On success, @sh_psize returns the patch size according to the section header,
427
* to the caller.
428
*/
429
static bool __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
430
{
431
u32 p_type, p_size;
432
const u32 *hdr;
433
434
if (buf_size < SECTION_HDR_SIZE) {
435
ucode_dbg("Truncated patch section.\n");
436
return false;
437
}
438
439
hdr = (const u32 *)buf;
440
p_type = hdr[0];
441
p_size = hdr[1];
442
443
if (p_type != UCODE_UCODE_TYPE) {
444
ucode_dbg("Invalid type field (0x%x) in container file section header.\n",
445
p_type);
446
return false;
447
}
448
449
if (p_size < sizeof(struct microcode_header_amd)) {
450
ucode_dbg("Patch of size %u too short.\n", p_size);
451
return false;
452
}
453
454
*sh_psize = p_size;
455
456
return true;
457
}
458
459
/*
460
* Check whether the passed remaining file @buf_size is large enough to contain
461
* a patch of the indicated @sh_psize (and also whether this size does not
462
* exceed the per-family maximum). @sh_psize is the size read from the section
463
* header.
464
*/
465
static bool __verify_patch_size(u32 sh_psize, size_t buf_size)
466
{
467
u8 family = x86_family(bsp_cpuid_1_eax);
468
u32 max_size;
469
470
if (family >= 0x15)
471
goto ret;
472
473
#define F1XH_MPB_MAX_SIZE 2048
474
#define F14H_MPB_MAX_SIZE 1824
475
476
switch (family) {
477
case 0x10 ... 0x12:
478
max_size = F1XH_MPB_MAX_SIZE;
479
break;
480
case 0x14:
481
max_size = F14H_MPB_MAX_SIZE;
482
break;
483
default:
484
WARN(1, "%s: WTF family: 0x%x\n", __func__, family);
485
return false;
486
}
487
488
if (sh_psize > max_size)
489
return false;
490
491
ret:
492
/* Working with the whole buffer so < is ok. */
493
return sh_psize <= buf_size;
494
}
495
496
/*
497
* Verify the patch in @buf.
498
*
499
* Returns:
500
* negative: on error
501
* positive: patch is not for this family, skip it
502
* 0: success
503
*/
504
static int verify_patch(const u8 *buf, size_t buf_size, u32 *patch_size)
505
{
506
u8 family = x86_family(bsp_cpuid_1_eax);
507
struct microcode_header_amd *mc_hdr;
508
u32 cur_rev, cutoff, patch_rev;
509
u32 sh_psize;
510
u16 proc_id;
511
u8 patch_fam;
512
513
if (!__verify_patch_section(buf, buf_size, &sh_psize))
514
return -1;
515
516
/*
517
* The section header length is not included in this indicated size
518
* but is present in the leftover file length so we need to subtract
519
* it before passing this value to the function below.
520
*/
521
buf_size -= SECTION_HDR_SIZE;
522
523
/*
524
* Check if the remaining buffer is big enough to contain a patch of
525
* size sh_psize, as the section claims.
526
*/
527
if (buf_size < sh_psize) {
528
ucode_dbg("Patch of size %u truncated.\n", sh_psize);
529
return -1;
530
}
531
532
if (!__verify_patch_size(sh_psize, buf_size)) {
533
ucode_dbg("Per-family patch size mismatch.\n");
534
return -1;
535
}
536
537
*patch_size = sh_psize;
538
539
mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE);
540
if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
541
pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
542
return -1;
543
}
544
545
proc_id = mc_hdr->processor_rev_id;
546
patch_fam = 0xf + (proc_id >> 12);
547
548
if (patch_fam != family)
549
return 1;
550
551
cur_rev = get_patch_level();
552
553
/* No cutoff revision means old/unaffected by signing algorithm weakness => matches */
554
cutoff = get_cutoff_revision(cur_rev);
555
if (!cutoff)
556
goto ok;
557
558
patch_rev = mc_hdr->patch_id;
559
560
ucode_dbg("cur_rev: 0x%x, cutoff: 0x%x, patch_rev: 0x%x\n",
561
cur_rev, cutoff, patch_rev);
562
563
if (cur_rev <= cutoff && patch_rev <= cutoff)
564
goto ok;
565
566
if (cur_rev > cutoff && patch_rev > cutoff)
567
goto ok;
568
569
return 1;
570
571
ok:
572
ucode_dbg("Patch-ID 0x%08x: family: 0x%x\n", mc_hdr->patch_id, patch_fam);
573
574
return 0;
575
}
576
577
static bool mc_patch_matches(struct microcode_amd *mc, u16 eq_id)
578
{
579
/* Zen and newer do not need an equivalence table. */
580
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
581
return ucode_rev_to_cpuid(mc->hdr.patch_id).full == bsp_cpuid_1_eax;
582
else
583
return eq_id == mc->hdr.processor_rev_id;
584
}
585
586
/*
587
* This scans the ucode blob for the proper container as we can have multiple
588
* containers glued together.
589
*
590
* Returns the amount of bytes consumed while scanning. @desc contains all the
591
* data we're going to use in later stages of the application.
592
*/
593
static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
594
{
595
struct equiv_cpu_table table;
596
size_t orig_size = size;
597
u32 *hdr = (u32 *)ucode;
598
u16 eq_id;
599
u8 *buf;
600
601
if (!verify_equivalence_table(ucode, size))
602
return 0;
603
604
buf = ucode;
605
606
table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
607
table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry);
608
609
/*
610
* Find the equivalence ID of our CPU in this table. Even if this table
611
* doesn't contain a patch for the CPU, scan through the whole container
612
* so that it can be skipped in case there are other containers appended.
613
*/
614
eq_id = find_equiv_id(&table, bsp_cpuid_1_eax);
615
616
buf += hdr[2] + CONTAINER_HDR_SZ;
617
size -= hdr[2] + CONTAINER_HDR_SZ;
618
619
/*
620
* Scan through the rest of the container to find where it ends. We do
621
* some basic sanity-checking too.
622
*/
623
while (size > 0) {
624
struct microcode_amd *mc;
625
u32 patch_size;
626
int ret;
627
628
ret = verify_patch(buf, size, &patch_size);
629
if (ret < 0) {
630
/*
631
* Patch verification failed, skip to the next container, if
632
* there is one. Before exit, check whether that container has
633
* found a patch already. If so, use it.
634
*/
635
goto out;
636
} else if (ret > 0) {
637
goto skip;
638
}
639
640
mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE);
641
642
if (mc_patch_matches(mc, eq_id)) {
643
desc->psize = patch_size;
644
desc->mc = mc;
645
646
ucode_dbg(" match: size: %d\n", patch_size);
647
}
648
649
skip:
650
/* Skip patch section header too: */
651
buf += patch_size + SECTION_HDR_SIZE;
652
size -= patch_size + SECTION_HDR_SIZE;
653
}
654
655
out:
656
/*
657
* If we have found a patch (desc->mc), it means we're looking at the
658
* container which has a patch for this CPU so return 0 to mean, @ucode
659
* already points to the proper container. Otherwise, we return the size
660
* we scanned so that we can advance to the next container in the
661
* buffer.
662
*/
663
if (desc->mc) {
664
desc->data = ucode;
665
desc->size = orig_size - size;
666
667
return 0;
668
}
669
670
return orig_size - size;
671
}
672
673
/*
674
* Scan the ucode blob for the proper container as we can have multiple
675
* containers glued together.
676
*/
677
static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
678
{
679
while (size) {
680
size_t s = parse_container(ucode, size, desc);
681
if (!s)
682
return;
683
684
/* catch wraparound */
685
if (size >= s) {
686
ucode += s;
687
size -= s;
688
} else {
689
return;
690
}
691
}
692
}
693
694
static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
695
unsigned int psize)
696
{
697
unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
698
699
if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
700
return false;
701
702
native_wrmsrq(MSR_AMD64_PATCH_LOADER, p_addr);
703
704
if (x86_family(bsp_cpuid_1_eax) == 0x17) {
705
unsigned long p_addr_end = p_addr + psize - 1;
706
707
invlpg(p_addr);
708
709
/*
710
* Flush next page too if patch image is crossing a page
711
* boundary.
712
*/
713
if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT)
714
invlpg(p_addr_end);
715
}
716
717
if (IS_ENABLED(CONFIG_MICROCODE_DBG))
718
microcode_rev[smp_processor_id()] = mc->hdr.patch_id;
719
720
/* verify patch application was successful */
721
*cur_rev = get_patch_level();
722
723
ucode_dbg("updated rev: 0x%x\n", *cur_rev);
724
725
if (*cur_rev != mc->hdr.patch_id)
726
return false;
727
728
return true;
729
}
730
731
static bool get_builtin_microcode(struct cpio_data *cp)
732
{
733
char fw_name[36] = "amd-ucode/microcode_amd.bin";
734
u8 family = x86_family(bsp_cpuid_1_eax);
735
struct firmware fw;
736
737
if (IS_ENABLED(CONFIG_X86_32))
738
return false;
739
740
if (family >= 0x15)
741
snprintf(fw_name, sizeof(fw_name),
742
"amd-ucode/microcode_amd_fam%02hhxh.bin", family);
743
744
if (firmware_request_builtin(&fw, fw_name)) {
745
cp->size = fw.size;
746
cp->data = (void *)fw.data;
747
return true;
748
}
749
750
return false;
751
}
752
753
static bool __init find_blobs_in_containers(struct cpio_data *ret)
754
{
755
struct cpio_data cp;
756
bool found;
757
758
if (!get_builtin_microcode(&cp))
759
cp = find_microcode_in_initrd(ucode_path);
760
761
found = cp.data && cp.size;
762
if (found)
763
*ret = cp;
764
765
return found;
766
}
767
768
/*
769
* Early load occurs before we can vmalloc(). So we look for the microcode
770
* patch container file in initrd, traverse equivalent cpu table, look for a
771
* matching microcode patch, and update, all in initrd memory in place.
772
* When vmalloc() is available for use later -- on 64-bit during first AP load,
773
* and on 32-bit during save_microcode_in_initrd() -- we can call
774
* load_microcode_amd() to save equivalent cpu table and microcode patches in
775
* kernel heap memory.
776
*/
777
void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
778
{
779
struct cont_desc desc = { };
780
struct microcode_amd *mc;
781
struct cpio_data cp = { };
782
char buf[4];
783
u32 rev;
784
785
if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) {
786
if (!strncmp(buf, "off", 3)) {
787
sha_check = false;
788
pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n");
789
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
790
}
791
}
792
793
bsp_cpuid_1_eax = cpuid_1_eax;
794
795
rev = get_patch_level();
796
ed->old_rev = rev;
797
798
/* Needed in load_microcode_amd() */
799
ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
800
801
if (!find_blobs_in_containers(&cp))
802
return;
803
804
scan_containers(cp.data, cp.size, &desc);
805
806
mc = desc.mc;
807
if (!mc)
808
return;
809
810
/*
811
* Allow application of the same revision to pick up SMT-specific
812
* changes even if the revision of the other SMT thread is already
813
* up-to-date.
814
*/
815
if (ed->old_rev > mc->hdr.patch_id)
816
return;
817
818
if (__apply_microcode_amd(mc, &rev, desc.psize))
819
ed->new_rev = rev;
820
}
821
822
static inline bool patch_cpus_equivalent(struct ucode_patch *p,
823
struct ucode_patch *n,
824
bool ignore_stepping)
825
{
826
/* Zen and newer hardcode the f/m/s in the patch ID */
827
if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
828
union cpuid_1_eax p_cid = ucode_rev_to_cpuid(p->patch_id);
829
union cpuid_1_eax n_cid = ucode_rev_to_cpuid(n->patch_id);
830
831
if (ignore_stepping) {
832
p_cid.stepping = 0;
833
n_cid.stepping = 0;
834
}
835
836
return p_cid.full == n_cid.full;
837
} else {
838
return p->equiv_cpu == n->equiv_cpu;
839
}
840
}
841
842
/*
843
* a small, trivial cache of per-family ucode patches
844
*/
845
static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equiv_cpu)
846
{
847
struct ucode_patch *p;
848
struct ucode_patch n;
849
850
n.equiv_cpu = equiv_cpu;
851
n.patch_id = uci->cpu_sig.rev;
852
853
list_for_each_entry(p, &microcode_cache, plist)
854
if (patch_cpus_equivalent(p, &n, false))
855
return p;
856
857
return NULL;
858
}
859
860
static inline int patch_newer(struct ucode_patch *p, struct ucode_patch *n)
861
{
862
/* Zen and newer hardcode the f/m/s in the patch ID */
863
if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
864
union zen_patch_rev zp, zn;
865
866
zp.ucode_rev = p->patch_id;
867
zn.ucode_rev = n->patch_id;
868
869
if (zn.stepping != zp.stepping)
870
return -1;
871
872
return zn.rev > zp.rev;
873
} else {
874
return n->patch_id > p->patch_id;
875
}
876
}
877
878
static void update_cache(struct ucode_patch *new_patch)
879
{
880
struct ucode_patch *p;
881
int ret;
882
883
list_for_each_entry(p, &microcode_cache, plist) {
884
if (patch_cpus_equivalent(p, new_patch, true)) {
885
ret = patch_newer(p, new_patch);
886
if (ret < 0)
887
continue;
888
else if (!ret) {
889
/* we already have the latest patch */
890
kfree(new_patch->data);
891
kfree(new_patch);
892
return;
893
}
894
895
list_replace(&p->plist, &new_patch->plist);
896
kfree(p->data);
897
kfree(p);
898
return;
899
}
900
}
901
/* no patch found, add it */
902
list_add_tail(&new_patch->plist, &microcode_cache);
903
}
904
905
static void free_cache(void)
906
{
907
struct ucode_patch *p, *tmp;
908
909
list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
910
__list_del(p->plist.prev, p->plist.next);
911
kfree(p->data);
912
kfree(p);
913
}
914
}
915
916
static struct ucode_patch *find_patch(unsigned int cpu)
917
{
918
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
919
u16 equiv_id = 0;
920
921
uci->cpu_sig.rev = get_patch_level();
922
923
if (x86_family(bsp_cpuid_1_eax) < 0x17) {
924
equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
925
if (!equiv_id)
926
return NULL;
927
}
928
929
return cache_find_patch(uci, equiv_id);
930
}
931
932
void reload_ucode_amd(unsigned int cpu)
933
{
934
u32 rev, dummy __always_unused;
935
struct microcode_amd *mc;
936
struct ucode_patch *p;
937
938
p = find_patch(cpu);
939
if (!p)
940
return;
941
942
mc = p->data;
943
944
rev = get_patch_level();
945
if (rev < mc->hdr.patch_id) {
946
if (__apply_microcode_amd(mc, &rev, p->size))
947
pr_info_once("reload revision: 0x%08x\n", rev);
948
}
949
}
950
951
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
952
{
953
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
954
struct ucode_patch *p;
955
956
csig->sig = cpuid_eax(0x00000001);
957
csig->rev = get_patch_level();
958
959
/*
960
* a patch could have been loaded early, set uci->mc so that
961
* mc_bp_resume() can call apply_microcode()
962
*/
963
p = find_patch(cpu);
964
if (p && (p->patch_id == csig->rev))
965
uci->mc = p->data;
966
967
return 0;
968
}
969
970
static enum ucode_state apply_microcode_amd(int cpu)
971
{
972
struct cpuinfo_x86 *c = &cpu_data(cpu);
973
struct microcode_amd *mc_amd;
974
struct ucode_cpu_info *uci;
975
struct ucode_patch *p;
976
enum ucode_state ret;
977
u32 rev;
978
979
BUG_ON(raw_smp_processor_id() != cpu);
980
981
uci = ucode_cpu_info + cpu;
982
983
p = find_patch(cpu);
984
if (!p)
985
return UCODE_NFOUND;
986
987
rev = uci->cpu_sig.rev;
988
989
mc_amd = p->data;
990
uci->mc = p->data;
991
992
/* need to apply patch? */
993
if (rev > mc_amd->hdr.patch_id) {
994
ret = UCODE_OK;
995
goto out;
996
}
997
998
if (!__apply_microcode_amd(mc_amd, &rev, p->size)) {
999
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
1000
cpu, mc_amd->hdr.patch_id);
1001
return UCODE_ERROR;
1002
}
1003
1004
rev = mc_amd->hdr.patch_id;
1005
ret = UCODE_UPDATED;
1006
1007
out:
1008
uci->cpu_sig.rev = rev;
1009
c->microcode = rev;
1010
1011
/* Update boot_cpu_data's revision too, if we're on the BSP: */
1012
if (c->cpu_index == boot_cpu_data.cpu_index)
1013
boot_cpu_data.microcode = rev;
1014
1015
return ret;
1016
}
1017
1018
void load_ucode_amd_ap(unsigned int cpuid_1_eax)
1019
{
1020
unsigned int cpu = smp_processor_id();
1021
1022
ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax;
1023
apply_microcode_amd(cpu);
1024
}
1025
1026
static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
1027
{
1028
u32 equiv_tbl_len;
1029
const u32 *hdr;
1030
1031
if (!verify_equivalence_table(buf, buf_size))
1032
return 0;
1033
1034
hdr = (const u32 *)buf;
1035
equiv_tbl_len = hdr[2];
1036
1037
/* Zen and newer do not need an equivalence table. */
1038
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
1039
goto out;
1040
1041
equiv_table.entry = vmalloc(equiv_tbl_len);
1042
if (!equiv_table.entry) {
1043
pr_err("failed to allocate equivalent CPU table\n");
1044
return 0;
1045
}
1046
1047
memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len);
1048
equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry);
1049
1050
out:
1051
/* add header length */
1052
return equiv_tbl_len + CONTAINER_HDR_SZ;
1053
}
1054
1055
static void free_equiv_cpu_table(void)
1056
{
1057
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
1058
return;
1059
1060
vfree(equiv_table.entry);
1061
memset(&equiv_table, 0, sizeof(equiv_table));
1062
}
1063
1064
static void cleanup(void)
1065
{
1066
free_equiv_cpu_table();
1067
free_cache();
1068
}
1069
1070
/*
1071
* Return a non-negative value even if some of the checks failed so that
1072
* we can skip over the next patch. If we return a negative value, we
1073
* signal a grave error like a memory allocation has failed and the
1074
* driver cannot continue functioning normally. In such cases, we tear
1075
* down everything we've used up so far and exit.
1076
*/
1077
static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
1078
unsigned int *patch_size)
1079
{
1080
struct microcode_header_amd *mc_hdr;
1081
struct ucode_patch *patch;
1082
u16 proc_id;
1083
int ret;
1084
1085
ret = verify_patch(fw, leftover, patch_size);
1086
if (ret)
1087
return ret;
1088
1089
patch = kzalloc(sizeof(*patch), GFP_KERNEL);
1090
if (!patch) {
1091
pr_err("Patch allocation failure.\n");
1092
return -EINVAL;
1093
}
1094
1095
patch->data = kmemdup(fw + SECTION_HDR_SIZE, *patch_size, GFP_KERNEL);
1096
if (!patch->data) {
1097
pr_err("Patch data allocation failure.\n");
1098
kfree(patch);
1099
return -EINVAL;
1100
}
1101
patch->size = *patch_size;
1102
1103
mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
1104
proc_id = mc_hdr->processor_rev_id;
1105
1106
INIT_LIST_HEAD(&patch->plist);
1107
patch->patch_id = mc_hdr->patch_id;
1108
patch->equiv_cpu = proc_id;
1109
1110
ucode_dbg("%s: Adding patch_id: 0x%08x, proc_id: 0x%04x\n",
1111
__func__, patch->patch_id, proc_id);
1112
1113
/* ... and add to cache. */
1114
update_cache(patch);
1115
1116
return 0;
1117
}
1118
1119
/* Scan the blob in @data and add microcode patches to the cache. */
1120
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size)
1121
{
1122
u8 *fw = (u8 *)data;
1123
size_t offset;
1124
1125
offset = install_equiv_cpu_table(data, size);
1126
if (!offset)
1127
return UCODE_ERROR;
1128
1129
fw += offset;
1130
size -= offset;
1131
1132
if (*(u32 *)fw != UCODE_UCODE_TYPE) {
1133
pr_err("invalid type field in container file section header\n");
1134
free_equiv_cpu_table();
1135
return UCODE_ERROR;
1136
}
1137
1138
while (size > 0) {
1139
unsigned int crnt_size = 0;
1140
int ret;
1141
1142
ret = verify_and_add_patch(family, fw, size, &crnt_size);
1143
if (ret < 0)
1144
return UCODE_ERROR;
1145
1146
fw += crnt_size + SECTION_HDR_SIZE;
1147
size -= (crnt_size + SECTION_HDR_SIZE);
1148
}
1149
1150
return UCODE_OK;
1151
}
1152
1153
static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size)
1154
{
1155
enum ucode_state ret;
1156
1157
/* free old equiv table */
1158
free_equiv_cpu_table();
1159
1160
ret = __load_microcode_amd(family, data, size);
1161
if (ret != UCODE_OK)
1162
cleanup();
1163
1164
return ret;
1165
}
1166
1167
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
1168
{
1169
struct cpuinfo_x86 *c;
1170
unsigned int nid, cpu;
1171
struct ucode_patch *p;
1172
enum ucode_state ret;
1173
1174
ret = _load_microcode_amd(family, data, size);
1175
if (ret != UCODE_OK)
1176
return ret;
1177
1178
for_each_node_with_cpus(nid) {
1179
cpu = cpumask_first(cpumask_of_node(nid));
1180
c = &cpu_data(cpu);
1181
1182
p = find_patch(cpu);
1183
if (!p)
1184
continue;
1185
1186
if (c->microcode >= p->patch_id)
1187
continue;
1188
1189
ret = UCODE_NEW;
1190
}
1191
1192
return ret;
1193
}
1194
1195
static int __init save_microcode_in_initrd(void)
1196
{
1197
struct cpuinfo_x86 *c = &boot_cpu_data;
1198
struct cont_desc desc = { 0 };
1199
unsigned int cpuid_1_eax;
1200
enum ucode_state ret;
1201
struct cpio_data cp;
1202
1203
if (microcode_loader_disabled() || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
1204
return 0;
1205
1206
cpuid_1_eax = native_cpuid_eax(1);
1207
1208
if (!find_blobs_in_containers(&cp))
1209
return -EINVAL;
1210
1211
scan_containers(cp.data, cp.size, &desc);
1212
if (!desc.mc)
1213
return -EINVAL;
1214
1215
ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
1216
if (ret > UCODE_UPDATED)
1217
return -EINVAL;
1218
1219
return 0;
1220
}
1221
early_initcall(save_microcode_in_initrd);
1222
1223
/*
1224
* AMD microcode firmware naming convention, up to family 15h they are in
1225
* the legacy file:
1226
*
1227
* amd-ucode/microcode_amd.bin
1228
*
1229
* This legacy file is always smaller than 2K in size.
1230
*
1231
* Beginning with family 15h, they are in family-specific firmware files:
1232
*
1233
* amd-ucode/microcode_amd_fam15h.bin
1234
* amd-ucode/microcode_amd_fam16h.bin
1235
* ...
1236
*
1237
* These might be larger than 2K.
1238
*/
1239
static enum ucode_state request_microcode_amd(int cpu, struct device *device)
1240
{
1241
char fw_name[36] = "amd-ucode/microcode_amd.bin";
1242
struct cpuinfo_x86 *c = &cpu_data(cpu);
1243
enum ucode_state ret = UCODE_NFOUND;
1244
const struct firmware *fw;
1245
1246
if (force_minrev)
1247
return UCODE_NFOUND;
1248
1249
if (c->x86 >= 0x15)
1250
snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
1251
1252
if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
1253
ucode_dbg("failed to load file %s\n", fw_name);
1254
goto out;
1255
}
1256
1257
ret = UCODE_ERROR;
1258
if (!verify_container(fw->data, fw->size))
1259
goto fw_release;
1260
1261
ret = load_microcode_amd(c->x86, fw->data, fw->size);
1262
1263
fw_release:
1264
release_firmware(fw);
1265
1266
out:
1267
return ret;
1268
}
1269
1270
static void microcode_fini_cpu_amd(int cpu)
1271
{
1272
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
1273
1274
uci->mc = NULL;
1275
}
1276
1277
static void finalize_late_load_amd(int result)
1278
{
1279
if (result)
1280
cleanup();
1281
}
1282
1283
static struct microcode_ops microcode_amd_ops = {
1284
.request_microcode_fw = request_microcode_amd,
1285
.collect_cpu_info = collect_cpu_info_amd,
1286
.apply_microcode = apply_microcode_amd,
1287
.microcode_fini_cpu = microcode_fini_cpu_amd,
1288
.finalize_late_load = finalize_late_load_amd,
1289
.nmi_safe = true,
1290
};
1291
1292
struct microcode_ops * __init init_amd_microcode(void)
1293
{
1294
struct cpuinfo_x86 *c = &boot_cpu_data;
1295
1296
if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
1297
pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
1298
return NULL;
1299
}
1300
return &microcode_amd_ops;
1301
}
1302
1303
void __exit exit_amd_microcode(void)
1304
{
1305
cleanup();
1306
}
1307
1308