Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kernel/cpu/microcode/amd.c
26515 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* AMD CPU Microcode Update Driver for Linux
4
*
5
* This driver allows to upgrade microcode on F10h AMD
6
* CPUs and later.
7
*
8
* Copyright (C) 2008-2011 Advanced Micro Devices Inc.
9
* 2013-2018 Borislav Petkov <[email protected]>
10
*
11
* Author: Peter Oruba <[email protected]>
12
*
13
* Based on work by:
14
* Tigran Aivazian <[email protected]>
15
*
16
* early loader:
17
* Copyright (C) 2013 Advanced Micro Devices, Inc.
18
*
19
* Author: Jacob Shin <[email protected]>
20
* Fixes: Borislav Petkov <[email protected]>
21
*/
22
#define pr_fmt(fmt) "microcode: " fmt
23
24
#include <linux/earlycpio.h>
25
#include <linux/firmware.h>
26
#include <linux/bsearch.h>
27
#include <linux/uaccess.h>
28
#include <linux/vmalloc.h>
29
#include <linux/initrd.h>
30
#include <linux/kernel.h>
31
#include <linux/pci.h>
32
33
#include <crypto/sha2.h>
34
35
#include <asm/microcode.h>
36
#include <asm/processor.h>
37
#include <asm/cmdline.h>
38
#include <asm/setup.h>
39
#include <asm/cpu.h>
40
#include <asm/msr.h>
41
#include <asm/tlb.h>
42
43
#include "internal.h"
44
45
struct ucode_patch {
46
struct list_head plist;
47
void *data;
48
unsigned int size;
49
u32 patch_id;
50
u16 equiv_cpu;
51
};
52
53
static LIST_HEAD(microcode_cache);
54
55
#define UCODE_MAGIC 0x00414d44
56
#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
57
#define UCODE_UCODE_TYPE 0x00000001
58
59
#define SECTION_HDR_SIZE 8
60
#define CONTAINER_HDR_SZ 12
61
62
struct equiv_cpu_entry {
63
u32 installed_cpu;
64
u32 fixed_errata_mask;
65
u32 fixed_errata_compare;
66
u16 equiv_cpu;
67
u16 res;
68
} __packed;
69
70
struct microcode_header_amd {
71
u32 data_code;
72
u32 patch_id;
73
u16 mc_patch_data_id;
74
u8 mc_patch_data_len;
75
u8 init_flag;
76
u32 mc_patch_data_checksum;
77
u32 nb_dev_id;
78
u32 sb_dev_id;
79
u16 processor_rev_id;
80
u8 nb_rev_id;
81
u8 sb_rev_id;
82
u8 bios_api_rev;
83
u8 reserved1[3];
84
u32 match_reg[8];
85
} __packed;
86
87
struct microcode_amd {
88
struct microcode_header_amd hdr;
89
unsigned int mpb[];
90
};
91
92
static struct equiv_cpu_table {
93
unsigned int num_entries;
94
struct equiv_cpu_entry *entry;
95
} equiv_table;
96
97
union zen_patch_rev {
98
struct {
99
__u32 rev : 8,
100
stepping : 4,
101
model : 4,
102
__reserved : 4,
103
ext_model : 4,
104
ext_fam : 8;
105
};
106
__u32 ucode_rev;
107
};
108
109
union cpuid_1_eax {
110
struct {
111
__u32 stepping : 4,
112
model : 4,
113
family : 4,
114
__reserved0 : 4,
115
ext_model : 4,
116
ext_fam : 8,
117
__reserved1 : 4;
118
};
119
__u32 full;
120
};
121
122
/*
123
* This points to the current valid container of microcode patches which we will
124
* save from the initrd/builtin before jettisoning its contents. @mc is the
125
* microcode patch we found to match.
126
*/
127
struct cont_desc {
128
struct microcode_amd *mc;
129
u32 psize;
130
u8 *data;
131
size_t size;
132
};
133
134
/*
135
* Microcode patch container file is prepended to the initrd in cpio
136
* format. See Documentation/arch/x86/microcode.rst
137
*/
138
static const char
139
ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
140
141
/*
142
* This is CPUID(1).EAX on the BSP. It is used in two ways:
143
*
144
* 1. To ignore the equivalence table on Zen1 and newer.
145
*
146
* 2. To match which patches to load because the patch revision ID
147
* already contains the f/m/s for which the microcode is destined
148
* for.
149
*/
150
static u32 bsp_cpuid_1_eax __ro_after_init;
151
152
static bool sha_check = true;
153
154
struct patch_digest {
155
u32 patch_id;
156
u8 sha256[SHA256_DIGEST_SIZE];
157
};
158
159
#include "amd_shas.c"
160
161
static int cmp_id(const void *key, const void *elem)
162
{
163
struct patch_digest *pd = (struct patch_digest *)elem;
164
u32 patch_id = *(u32 *)key;
165
166
if (patch_id == pd->patch_id)
167
return 0;
168
else if (patch_id < pd->patch_id)
169
return -1;
170
else
171
return 1;
172
}
173
174
static u32 cpuid_to_ucode_rev(unsigned int val)
175
{
176
union zen_patch_rev p = {};
177
union cpuid_1_eax c;
178
179
c.full = val;
180
181
p.stepping = c.stepping;
182
p.model = c.model;
183
p.ext_model = c.ext_model;
184
p.ext_fam = c.ext_fam;
185
186
return p.ucode_rev;
187
}
188
189
static bool need_sha_check(u32 cur_rev)
190
{
191
if (!cur_rev) {
192
cur_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax);
193
pr_info_once("No current revision, generating the lowest one: 0x%x\n", cur_rev);
194
}
195
196
switch (cur_rev >> 8) {
197
case 0x80012: return cur_rev <= 0x800126f; break;
198
case 0x80082: return cur_rev <= 0x800820f; break;
199
case 0x83010: return cur_rev <= 0x830107c; break;
200
case 0x86001: return cur_rev <= 0x860010e; break;
201
case 0x86081: return cur_rev <= 0x8608108; break;
202
case 0x87010: return cur_rev <= 0x8701034; break;
203
case 0x8a000: return cur_rev <= 0x8a0000a; break;
204
case 0xa0010: return cur_rev <= 0xa00107a; break;
205
case 0xa0011: return cur_rev <= 0xa0011da; break;
206
case 0xa0012: return cur_rev <= 0xa001243; break;
207
case 0xa0082: return cur_rev <= 0xa00820e; break;
208
case 0xa1011: return cur_rev <= 0xa101153; break;
209
case 0xa1012: return cur_rev <= 0xa10124e; break;
210
case 0xa1081: return cur_rev <= 0xa108109; break;
211
case 0xa2010: return cur_rev <= 0xa20102f; break;
212
case 0xa2012: return cur_rev <= 0xa201212; break;
213
case 0xa4041: return cur_rev <= 0xa404109; break;
214
case 0xa5000: return cur_rev <= 0xa500013; break;
215
case 0xa6012: return cur_rev <= 0xa60120a; break;
216
case 0xa7041: return cur_rev <= 0xa704109; break;
217
case 0xa7052: return cur_rev <= 0xa705208; break;
218
case 0xa7080: return cur_rev <= 0xa708009; break;
219
case 0xa70c0: return cur_rev <= 0xa70C009; break;
220
case 0xaa001: return cur_rev <= 0xaa00116; break;
221
case 0xaa002: return cur_rev <= 0xaa00218; break;
222
case 0xb0021: return cur_rev <= 0xb002146; break;
223
case 0xb1010: return cur_rev <= 0xb101046; break;
224
case 0xb2040: return cur_rev <= 0xb204031; break;
225
case 0xb4040: return cur_rev <= 0xb404031; break;
226
case 0xb6000: return cur_rev <= 0xb600031; break;
227
case 0xb7000: return cur_rev <= 0xb700031; break;
228
default: break;
229
}
230
231
pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n");
232
pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev);
233
return true;
234
}
235
236
static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
237
{
238
struct patch_digest *pd = NULL;
239
u8 digest[SHA256_DIGEST_SIZE];
240
int i;
241
242
if (x86_family(bsp_cpuid_1_eax) < 0x17)
243
return true;
244
245
if (!need_sha_check(cur_rev))
246
return true;
247
248
if (!sha_check)
249
return true;
250
251
pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id);
252
if (!pd) {
253
pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id);
254
return false;
255
}
256
257
sha256(data, len, digest);
258
259
if (memcmp(digest, pd->sha256, sizeof(digest))) {
260
pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id);
261
262
for (i = 0; i < SHA256_DIGEST_SIZE; i++)
263
pr_cont("0x%x ", digest[i]);
264
pr_info("\n");
265
266
return false;
267
}
268
269
return true;
270
}
271
272
static u32 get_patch_level(void)
273
{
274
u32 rev, dummy __always_unused;
275
276
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
277
278
return rev;
279
}
280
281
static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val)
282
{
283
union zen_patch_rev p;
284
union cpuid_1_eax c;
285
286
p.ucode_rev = val;
287
c.full = 0;
288
289
c.stepping = p.stepping;
290
c.model = p.model;
291
c.ext_model = p.ext_model;
292
c.family = 0xf;
293
c.ext_fam = p.ext_fam;
294
295
return c;
296
}
297
298
static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
299
{
300
unsigned int i;
301
302
/* Zen and newer do not need an equivalence table. */
303
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
304
return 0;
305
306
if (!et || !et->num_entries)
307
return 0;
308
309
for (i = 0; i < et->num_entries; i++) {
310
struct equiv_cpu_entry *e = &et->entry[i];
311
312
if (sig == e->installed_cpu)
313
return e->equiv_cpu;
314
}
315
return 0;
316
}
317
318
/*
319
* Check whether there is a valid microcode container file at the beginning
320
* of @buf of size @buf_size.
321
*/
322
static bool verify_container(const u8 *buf, size_t buf_size)
323
{
324
u32 cont_magic;
325
326
if (buf_size <= CONTAINER_HDR_SZ) {
327
pr_debug("Truncated microcode container header.\n");
328
return false;
329
}
330
331
cont_magic = *(const u32 *)buf;
332
if (cont_magic != UCODE_MAGIC) {
333
pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
334
return false;
335
}
336
337
return true;
338
}
339
340
/*
341
* Check whether there is a valid, non-truncated CPU equivalence table at the
342
* beginning of @buf of size @buf_size.
343
*/
344
static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
345
{
346
const u32 *hdr = (const u32 *)buf;
347
u32 cont_type, equiv_tbl_len;
348
349
if (!verify_container(buf, buf_size))
350
return false;
351
352
/* Zen and newer do not need an equivalence table. */
353
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
354
return true;
355
356
cont_type = hdr[1];
357
if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
358
pr_debug("Wrong microcode container equivalence table type: %u.\n",
359
cont_type);
360
return false;
361
}
362
363
buf_size -= CONTAINER_HDR_SZ;
364
365
equiv_tbl_len = hdr[2];
366
if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) ||
367
buf_size < equiv_tbl_len) {
368
pr_debug("Truncated equivalence table.\n");
369
return false;
370
}
371
372
return true;
373
}
374
375
/*
376
* Check whether there is a valid, non-truncated microcode patch section at the
377
* beginning of @buf of size @buf_size.
378
*
379
* On success, @sh_psize returns the patch size according to the section header,
380
* to the caller.
381
*/
382
static bool __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
383
{
384
u32 p_type, p_size;
385
const u32 *hdr;
386
387
if (buf_size < SECTION_HDR_SIZE) {
388
pr_debug("Truncated patch section.\n");
389
return false;
390
}
391
392
hdr = (const u32 *)buf;
393
p_type = hdr[0];
394
p_size = hdr[1];
395
396
if (p_type != UCODE_UCODE_TYPE) {
397
pr_debug("Invalid type field (0x%x) in container file section header.\n",
398
p_type);
399
return false;
400
}
401
402
if (p_size < sizeof(struct microcode_header_amd)) {
403
pr_debug("Patch of size %u too short.\n", p_size);
404
return false;
405
}
406
407
*sh_psize = p_size;
408
409
return true;
410
}
411
412
/*
413
* Check whether the passed remaining file @buf_size is large enough to contain
414
* a patch of the indicated @sh_psize (and also whether this size does not
415
* exceed the per-family maximum). @sh_psize is the size read from the section
416
* header.
417
*/
418
static bool __verify_patch_size(u32 sh_psize, size_t buf_size)
419
{
420
u8 family = x86_family(bsp_cpuid_1_eax);
421
u32 max_size;
422
423
if (family >= 0x15)
424
goto ret;
425
426
#define F1XH_MPB_MAX_SIZE 2048
427
#define F14H_MPB_MAX_SIZE 1824
428
429
switch (family) {
430
case 0x10 ... 0x12:
431
max_size = F1XH_MPB_MAX_SIZE;
432
break;
433
case 0x14:
434
max_size = F14H_MPB_MAX_SIZE;
435
break;
436
default:
437
WARN(1, "%s: WTF family: 0x%x\n", __func__, family);
438
return false;
439
}
440
441
if (sh_psize > max_size)
442
return false;
443
444
ret:
445
/* Working with the whole buffer so < is ok. */
446
return sh_psize <= buf_size;
447
}
448
449
/*
450
* Verify the patch in @buf.
451
*
452
* Returns:
453
* negative: on error
454
* positive: patch is not for this family, skip it
455
* 0: success
456
*/
457
static int verify_patch(const u8 *buf, size_t buf_size, u32 *patch_size)
458
{
459
u8 family = x86_family(bsp_cpuid_1_eax);
460
struct microcode_header_amd *mc_hdr;
461
u32 sh_psize;
462
u16 proc_id;
463
u8 patch_fam;
464
465
if (!__verify_patch_section(buf, buf_size, &sh_psize))
466
return -1;
467
468
/*
469
* The section header length is not included in this indicated size
470
* but is present in the leftover file length so we need to subtract
471
* it before passing this value to the function below.
472
*/
473
buf_size -= SECTION_HDR_SIZE;
474
475
/*
476
* Check if the remaining buffer is big enough to contain a patch of
477
* size sh_psize, as the section claims.
478
*/
479
if (buf_size < sh_psize) {
480
pr_debug("Patch of size %u truncated.\n", sh_psize);
481
return -1;
482
}
483
484
if (!__verify_patch_size(sh_psize, buf_size)) {
485
pr_debug("Per-family patch size mismatch.\n");
486
return -1;
487
}
488
489
*patch_size = sh_psize;
490
491
mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE);
492
if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
493
pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
494
return -1;
495
}
496
497
proc_id = mc_hdr->processor_rev_id;
498
patch_fam = 0xf + (proc_id >> 12);
499
if (patch_fam != family)
500
return 1;
501
502
return 0;
503
}
504
505
static bool mc_patch_matches(struct microcode_amd *mc, u16 eq_id)
506
{
507
/* Zen and newer do not need an equivalence table. */
508
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
509
return ucode_rev_to_cpuid(mc->hdr.patch_id).full == bsp_cpuid_1_eax;
510
else
511
return eq_id == mc->hdr.processor_rev_id;
512
}
513
514
/*
515
* This scans the ucode blob for the proper container as we can have multiple
516
* containers glued together.
517
*
518
* Returns the amount of bytes consumed while scanning. @desc contains all the
519
* data we're going to use in later stages of the application.
520
*/
521
static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
522
{
523
struct equiv_cpu_table table;
524
size_t orig_size = size;
525
u32 *hdr = (u32 *)ucode;
526
u16 eq_id;
527
u8 *buf;
528
529
if (!verify_equivalence_table(ucode, size))
530
return 0;
531
532
buf = ucode;
533
534
table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
535
table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry);
536
537
/*
538
* Find the equivalence ID of our CPU in this table. Even if this table
539
* doesn't contain a patch for the CPU, scan through the whole container
540
* so that it can be skipped in case there are other containers appended.
541
*/
542
eq_id = find_equiv_id(&table, bsp_cpuid_1_eax);
543
544
buf += hdr[2] + CONTAINER_HDR_SZ;
545
size -= hdr[2] + CONTAINER_HDR_SZ;
546
547
/*
548
* Scan through the rest of the container to find where it ends. We do
549
* some basic sanity-checking too.
550
*/
551
while (size > 0) {
552
struct microcode_amd *mc;
553
u32 patch_size;
554
int ret;
555
556
ret = verify_patch(buf, size, &patch_size);
557
if (ret < 0) {
558
/*
559
* Patch verification failed, skip to the next container, if
560
* there is one. Before exit, check whether that container has
561
* found a patch already. If so, use it.
562
*/
563
goto out;
564
} else if (ret > 0) {
565
goto skip;
566
}
567
568
mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE);
569
if (mc_patch_matches(mc, eq_id)) {
570
desc->psize = patch_size;
571
desc->mc = mc;
572
}
573
574
skip:
575
/* Skip patch section header too: */
576
buf += patch_size + SECTION_HDR_SIZE;
577
size -= patch_size + SECTION_HDR_SIZE;
578
}
579
580
out:
581
/*
582
* If we have found a patch (desc->mc), it means we're looking at the
583
* container which has a patch for this CPU so return 0 to mean, @ucode
584
* already points to the proper container. Otherwise, we return the size
585
* we scanned so that we can advance to the next container in the
586
* buffer.
587
*/
588
if (desc->mc) {
589
desc->data = ucode;
590
desc->size = orig_size - size;
591
592
return 0;
593
}
594
595
return orig_size - size;
596
}
597
598
/*
599
* Scan the ucode blob for the proper container as we can have multiple
600
* containers glued together.
601
*/
602
static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
603
{
604
while (size) {
605
size_t s = parse_container(ucode, size, desc);
606
if (!s)
607
return;
608
609
/* catch wraparound */
610
if (size >= s) {
611
ucode += s;
612
size -= s;
613
} else {
614
return;
615
}
616
}
617
}
618
619
static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
620
unsigned int psize)
621
{
622
unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
623
624
if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
625
return false;
626
627
native_wrmsrq(MSR_AMD64_PATCH_LOADER, p_addr);
628
629
if (x86_family(bsp_cpuid_1_eax) == 0x17) {
630
unsigned long p_addr_end = p_addr + psize - 1;
631
632
invlpg(p_addr);
633
634
/*
635
* Flush next page too if patch image is crossing a page
636
* boundary.
637
*/
638
if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT)
639
invlpg(p_addr_end);
640
}
641
642
/* verify patch application was successful */
643
*cur_rev = get_patch_level();
644
if (*cur_rev != mc->hdr.patch_id)
645
return false;
646
647
return true;
648
}
649
650
static bool get_builtin_microcode(struct cpio_data *cp)
651
{
652
char fw_name[36] = "amd-ucode/microcode_amd.bin";
653
u8 family = x86_family(bsp_cpuid_1_eax);
654
struct firmware fw;
655
656
if (IS_ENABLED(CONFIG_X86_32))
657
return false;
658
659
if (family >= 0x15)
660
snprintf(fw_name, sizeof(fw_name),
661
"amd-ucode/microcode_amd_fam%02hhxh.bin", family);
662
663
if (firmware_request_builtin(&fw, fw_name)) {
664
cp->size = fw.size;
665
cp->data = (void *)fw.data;
666
return true;
667
}
668
669
return false;
670
}
671
672
static bool __init find_blobs_in_containers(struct cpio_data *ret)
673
{
674
struct cpio_data cp;
675
bool found;
676
677
if (!get_builtin_microcode(&cp))
678
cp = find_microcode_in_initrd(ucode_path);
679
680
found = cp.data && cp.size;
681
if (found)
682
*ret = cp;
683
684
return found;
685
}
686
687
/*
688
* Early load occurs before we can vmalloc(). So we look for the microcode
689
* patch container file in initrd, traverse equivalent cpu table, look for a
690
* matching microcode patch, and update, all in initrd memory in place.
691
* When vmalloc() is available for use later -- on 64-bit during first AP load,
692
* and on 32-bit during save_microcode_in_initrd() -- we can call
693
* load_microcode_amd() to save equivalent cpu table and microcode patches in
694
* kernel heap memory.
695
*/
696
void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
697
{
698
struct cont_desc desc = { };
699
struct microcode_amd *mc;
700
struct cpio_data cp = { };
701
char buf[4];
702
u32 rev;
703
704
if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) {
705
if (!strncmp(buf, "off", 3)) {
706
sha_check = false;
707
pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n");
708
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
709
}
710
}
711
712
bsp_cpuid_1_eax = cpuid_1_eax;
713
714
rev = get_patch_level();
715
ed->old_rev = rev;
716
717
/* Needed in load_microcode_amd() */
718
ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
719
720
if (!find_blobs_in_containers(&cp))
721
return;
722
723
scan_containers(cp.data, cp.size, &desc);
724
725
mc = desc.mc;
726
if (!mc)
727
return;
728
729
/*
730
* Allow application of the same revision to pick up SMT-specific
731
* changes even if the revision of the other SMT thread is already
732
* up-to-date.
733
*/
734
if (ed->old_rev > mc->hdr.patch_id)
735
return;
736
737
if (__apply_microcode_amd(mc, &rev, desc.psize))
738
ed->new_rev = rev;
739
}
740
741
static inline bool patch_cpus_equivalent(struct ucode_patch *p,
742
struct ucode_patch *n,
743
bool ignore_stepping)
744
{
745
/* Zen and newer hardcode the f/m/s in the patch ID */
746
if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
747
union cpuid_1_eax p_cid = ucode_rev_to_cpuid(p->patch_id);
748
union cpuid_1_eax n_cid = ucode_rev_to_cpuid(n->patch_id);
749
750
if (ignore_stepping) {
751
p_cid.stepping = 0;
752
n_cid.stepping = 0;
753
}
754
755
return p_cid.full == n_cid.full;
756
} else {
757
return p->equiv_cpu == n->equiv_cpu;
758
}
759
}
760
761
/*
762
* a small, trivial cache of per-family ucode patches
763
*/
764
static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equiv_cpu)
765
{
766
struct ucode_patch *p;
767
struct ucode_patch n;
768
769
n.equiv_cpu = equiv_cpu;
770
n.patch_id = uci->cpu_sig.rev;
771
772
list_for_each_entry(p, &microcode_cache, plist)
773
if (patch_cpus_equivalent(p, &n, false))
774
return p;
775
776
return NULL;
777
}
778
779
static inline int patch_newer(struct ucode_patch *p, struct ucode_patch *n)
780
{
781
/* Zen and newer hardcode the f/m/s in the patch ID */
782
if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
783
union zen_patch_rev zp, zn;
784
785
zp.ucode_rev = p->patch_id;
786
zn.ucode_rev = n->patch_id;
787
788
if (zn.stepping != zp.stepping)
789
return -1;
790
791
return zn.rev > zp.rev;
792
} else {
793
return n->patch_id > p->patch_id;
794
}
795
}
796
797
static void update_cache(struct ucode_patch *new_patch)
798
{
799
struct ucode_patch *p;
800
int ret;
801
802
list_for_each_entry(p, &microcode_cache, plist) {
803
if (patch_cpus_equivalent(p, new_patch, true)) {
804
ret = patch_newer(p, new_patch);
805
if (ret < 0)
806
continue;
807
else if (!ret) {
808
/* we already have the latest patch */
809
kfree(new_patch->data);
810
kfree(new_patch);
811
return;
812
}
813
814
list_replace(&p->plist, &new_patch->plist);
815
kfree(p->data);
816
kfree(p);
817
return;
818
}
819
}
820
/* no patch found, add it */
821
list_add_tail(&new_patch->plist, &microcode_cache);
822
}
823
824
static void free_cache(void)
825
{
826
struct ucode_patch *p, *tmp;
827
828
list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
829
__list_del(p->plist.prev, p->plist.next);
830
kfree(p->data);
831
kfree(p);
832
}
833
}
834
835
static struct ucode_patch *find_patch(unsigned int cpu)
836
{
837
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
838
u16 equiv_id = 0;
839
840
uci->cpu_sig.rev = get_patch_level();
841
842
if (x86_family(bsp_cpuid_1_eax) < 0x17) {
843
equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
844
if (!equiv_id)
845
return NULL;
846
}
847
848
return cache_find_patch(uci, equiv_id);
849
}
850
851
void reload_ucode_amd(unsigned int cpu)
852
{
853
u32 rev, dummy __always_unused;
854
struct microcode_amd *mc;
855
struct ucode_patch *p;
856
857
p = find_patch(cpu);
858
if (!p)
859
return;
860
861
mc = p->data;
862
863
rev = get_patch_level();
864
if (rev < mc->hdr.patch_id) {
865
if (__apply_microcode_amd(mc, &rev, p->size))
866
pr_info_once("reload revision: 0x%08x\n", rev);
867
}
868
}
869
870
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
871
{
872
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
873
struct ucode_patch *p;
874
875
csig->sig = cpuid_eax(0x00000001);
876
csig->rev = get_patch_level();
877
878
/*
879
* a patch could have been loaded early, set uci->mc so that
880
* mc_bp_resume() can call apply_microcode()
881
*/
882
p = find_patch(cpu);
883
if (p && (p->patch_id == csig->rev))
884
uci->mc = p->data;
885
886
return 0;
887
}
888
889
static enum ucode_state apply_microcode_amd(int cpu)
890
{
891
struct cpuinfo_x86 *c = &cpu_data(cpu);
892
struct microcode_amd *mc_amd;
893
struct ucode_cpu_info *uci;
894
struct ucode_patch *p;
895
enum ucode_state ret;
896
u32 rev;
897
898
BUG_ON(raw_smp_processor_id() != cpu);
899
900
uci = ucode_cpu_info + cpu;
901
902
p = find_patch(cpu);
903
if (!p)
904
return UCODE_NFOUND;
905
906
rev = uci->cpu_sig.rev;
907
908
mc_amd = p->data;
909
uci->mc = p->data;
910
911
/* need to apply patch? */
912
if (rev > mc_amd->hdr.patch_id) {
913
ret = UCODE_OK;
914
goto out;
915
}
916
917
if (!__apply_microcode_amd(mc_amd, &rev, p->size)) {
918
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
919
cpu, mc_amd->hdr.patch_id);
920
return UCODE_ERROR;
921
}
922
923
rev = mc_amd->hdr.patch_id;
924
ret = UCODE_UPDATED;
925
926
out:
927
uci->cpu_sig.rev = rev;
928
c->microcode = rev;
929
930
/* Update boot_cpu_data's revision too, if we're on the BSP: */
931
if (c->cpu_index == boot_cpu_data.cpu_index)
932
boot_cpu_data.microcode = rev;
933
934
return ret;
935
}
936
937
void load_ucode_amd_ap(unsigned int cpuid_1_eax)
938
{
939
unsigned int cpu = smp_processor_id();
940
941
ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax;
942
apply_microcode_amd(cpu);
943
}
944
945
static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
946
{
947
u32 equiv_tbl_len;
948
const u32 *hdr;
949
950
if (!verify_equivalence_table(buf, buf_size))
951
return 0;
952
953
hdr = (const u32 *)buf;
954
equiv_tbl_len = hdr[2];
955
956
/* Zen and newer do not need an equivalence table. */
957
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
958
goto out;
959
960
equiv_table.entry = vmalloc(equiv_tbl_len);
961
if (!equiv_table.entry) {
962
pr_err("failed to allocate equivalent CPU table\n");
963
return 0;
964
}
965
966
memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len);
967
equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry);
968
969
out:
970
/* add header length */
971
return equiv_tbl_len + CONTAINER_HDR_SZ;
972
}
973
974
static void free_equiv_cpu_table(void)
975
{
976
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
977
return;
978
979
vfree(equiv_table.entry);
980
memset(&equiv_table, 0, sizeof(equiv_table));
981
}
982
983
static void cleanup(void)
984
{
985
free_equiv_cpu_table();
986
free_cache();
987
}
988
989
/*
990
* Return a non-negative value even if some of the checks failed so that
991
* we can skip over the next patch. If we return a negative value, we
992
* signal a grave error like a memory allocation has failed and the
993
* driver cannot continue functioning normally. In such cases, we tear
994
* down everything we've used up so far and exit.
995
*/
996
static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
997
unsigned int *patch_size)
998
{
999
struct microcode_header_amd *mc_hdr;
1000
struct ucode_patch *patch;
1001
u16 proc_id;
1002
int ret;
1003
1004
ret = verify_patch(fw, leftover, patch_size);
1005
if (ret)
1006
return ret;
1007
1008
patch = kzalloc(sizeof(*patch), GFP_KERNEL);
1009
if (!patch) {
1010
pr_err("Patch allocation failure.\n");
1011
return -EINVAL;
1012
}
1013
1014
patch->data = kmemdup(fw + SECTION_HDR_SIZE, *patch_size, GFP_KERNEL);
1015
if (!patch->data) {
1016
pr_err("Patch data allocation failure.\n");
1017
kfree(patch);
1018
return -EINVAL;
1019
}
1020
patch->size = *patch_size;
1021
1022
mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
1023
proc_id = mc_hdr->processor_rev_id;
1024
1025
INIT_LIST_HEAD(&patch->plist);
1026
patch->patch_id = mc_hdr->patch_id;
1027
patch->equiv_cpu = proc_id;
1028
1029
pr_debug("%s: Adding patch_id: 0x%08x, proc_id: 0x%04x\n",
1030
__func__, patch->patch_id, proc_id);
1031
1032
/* ... and add to cache. */
1033
update_cache(patch);
1034
1035
return 0;
1036
}
1037
1038
/* Scan the blob in @data and add microcode patches to the cache. */
1039
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size)
1040
{
1041
u8 *fw = (u8 *)data;
1042
size_t offset;
1043
1044
offset = install_equiv_cpu_table(data, size);
1045
if (!offset)
1046
return UCODE_ERROR;
1047
1048
fw += offset;
1049
size -= offset;
1050
1051
if (*(u32 *)fw != UCODE_UCODE_TYPE) {
1052
pr_err("invalid type field in container file section header\n");
1053
free_equiv_cpu_table();
1054
return UCODE_ERROR;
1055
}
1056
1057
while (size > 0) {
1058
unsigned int crnt_size = 0;
1059
int ret;
1060
1061
ret = verify_and_add_patch(family, fw, size, &crnt_size);
1062
if (ret < 0)
1063
return UCODE_ERROR;
1064
1065
fw += crnt_size + SECTION_HDR_SIZE;
1066
size -= (crnt_size + SECTION_HDR_SIZE);
1067
}
1068
1069
return UCODE_OK;
1070
}
1071
1072
static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size)
1073
{
1074
enum ucode_state ret;
1075
1076
/* free old equiv table */
1077
free_equiv_cpu_table();
1078
1079
ret = __load_microcode_amd(family, data, size);
1080
if (ret != UCODE_OK)
1081
cleanup();
1082
1083
return ret;
1084
}
1085
1086
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
1087
{
1088
struct cpuinfo_x86 *c;
1089
unsigned int nid, cpu;
1090
struct ucode_patch *p;
1091
enum ucode_state ret;
1092
1093
ret = _load_microcode_amd(family, data, size);
1094
if (ret != UCODE_OK)
1095
return ret;
1096
1097
for_each_node_with_cpus(nid) {
1098
cpu = cpumask_first(cpumask_of_node(nid));
1099
c = &cpu_data(cpu);
1100
1101
p = find_patch(cpu);
1102
if (!p)
1103
continue;
1104
1105
if (c->microcode >= p->patch_id)
1106
continue;
1107
1108
ret = UCODE_NEW;
1109
}
1110
1111
return ret;
1112
}
1113
1114
static int __init save_microcode_in_initrd(void)
1115
{
1116
struct cpuinfo_x86 *c = &boot_cpu_data;
1117
struct cont_desc desc = { 0 };
1118
unsigned int cpuid_1_eax;
1119
enum ucode_state ret;
1120
struct cpio_data cp;
1121
1122
if (microcode_loader_disabled() || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
1123
return 0;
1124
1125
cpuid_1_eax = native_cpuid_eax(1);
1126
1127
if (!find_blobs_in_containers(&cp))
1128
return -EINVAL;
1129
1130
scan_containers(cp.data, cp.size, &desc);
1131
if (!desc.mc)
1132
return -EINVAL;
1133
1134
ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
1135
if (ret > UCODE_UPDATED)
1136
return -EINVAL;
1137
1138
return 0;
1139
}
1140
early_initcall(save_microcode_in_initrd);
1141
1142
/*
1143
* AMD microcode firmware naming convention, up to family 15h they are in
1144
* the legacy file:
1145
*
1146
* amd-ucode/microcode_amd.bin
1147
*
1148
* This legacy file is always smaller than 2K in size.
1149
*
1150
* Beginning with family 15h, they are in family-specific firmware files:
1151
*
1152
* amd-ucode/microcode_amd_fam15h.bin
1153
* amd-ucode/microcode_amd_fam16h.bin
1154
* ...
1155
*
1156
* These might be larger than 2K.
1157
*/
1158
static enum ucode_state request_microcode_amd(int cpu, struct device *device)
1159
{
1160
char fw_name[36] = "amd-ucode/microcode_amd.bin";
1161
struct cpuinfo_x86 *c = &cpu_data(cpu);
1162
enum ucode_state ret = UCODE_NFOUND;
1163
const struct firmware *fw;
1164
1165
if (force_minrev)
1166
return UCODE_NFOUND;
1167
1168
if (c->x86 >= 0x15)
1169
snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
1170
1171
if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
1172
pr_debug("failed to load file %s\n", fw_name);
1173
goto out;
1174
}
1175
1176
ret = UCODE_ERROR;
1177
if (!verify_container(fw->data, fw->size))
1178
goto fw_release;
1179
1180
ret = load_microcode_amd(c->x86, fw->data, fw->size);
1181
1182
fw_release:
1183
release_firmware(fw);
1184
1185
out:
1186
return ret;
1187
}
1188
1189
static void microcode_fini_cpu_amd(int cpu)
1190
{
1191
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
1192
1193
uci->mc = NULL;
1194
}
1195
1196
static void finalize_late_load_amd(int result)
1197
{
1198
if (result)
1199
cleanup();
1200
}
1201
1202
static struct microcode_ops microcode_amd_ops = {
1203
.request_microcode_fw = request_microcode_amd,
1204
.collect_cpu_info = collect_cpu_info_amd,
1205
.apply_microcode = apply_microcode_amd,
1206
.microcode_fini_cpu = microcode_fini_cpu_amd,
1207
.finalize_late_load = finalize_late_load_amd,
1208
.nmi_safe = true,
1209
};
1210
1211
struct microcode_ops * __init init_amd_microcode(void)
1212
{
1213
struct cpuinfo_x86 *c = &boot_cpu_data;
1214
1215
if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
1216
pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
1217
return NULL;
1218
}
1219
return &microcode_amd_ops;
1220
}
1221
1222
void __exit exit_amd_microcode(void)
1223
{
1224
cleanup();
1225
}
1226
1227