Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kernel/cpu/microcode/intel.c
49062 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Intel CPU Microcode Update Driver for Linux
4
*
5
* Copyright (C) 2000-2006 Tigran Aivazian <[email protected]>
6
* 2006 Shaohua Li <[email protected]>
7
*
8
* Intel CPU microcode early update for Linux
9
*
10
* Copyright (C) 2012 Fenghua Yu <[email protected]>
11
* H Peter Anvin" <[email protected]>
12
*/
13
#define pr_fmt(fmt) "microcode: " fmt
14
#include <linux/earlycpio.h>
15
#include <linux/firmware.h>
16
#include <linux/pci_ids.h>
17
#include <linux/uaccess.h>
18
#include <linux/initrd.h>
19
#include <linux/kernel.h>
20
#include <linux/delay.h>
21
#include <linux/slab.h>
22
#include <linux/cpu.h>
23
#include <linux/uio.h>
24
#include <linux/io.h>
25
#include <linux/mm.h>
26
27
#include <asm/cpu_device_id.h>
28
#include <asm/processor.h>
29
#include <asm/tlbflush.h>
30
#include <asm/setup.h>
31
#include <asm/msr.h>
32
33
#include "internal.h"
34
35
static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
36
37
#define UCODE_BSP_LOADED ((struct microcode_intel *)0x1UL)
38
39
/* Defines for the microcode staging mailbox interface */
40
#define MBOX_REG_NUM 4
41
#define MBOX_REG_SIZE sizeof(u32)
42
43
#define MBOX_CONTROL_OFFSET 0x0
44
#define MBOX_STATUS_OFFSET 0x4
45
#define MBOX_WRDATA_OFFSET 0x8
46
#define MBOX_RDDATA_OFFSET 0xc
47
48
#define MASK_MBOX_CTRL_ABORT BIT(0)
49
#define MASK_MBOX_CTRL_GO BIT(31)
50
51
#define MASK_MBOX_STATUS_ERROR BIT(2)
52
#define MASK_MBOX_STATUS_READY BIT(31)
53
54
#define MASK_MBOX_RESP_SUCCESS BIT(0)
55
#define MASK_MBOX_RESP_PROGRESS BIT(1)
56
#define MASK_MBOX_RESP_ERROR BIT(2)
57
58
#define MBOX_CMD_LOAD 0x3
59
#define MBOX_OBJ_STAGING 0xb
60
#define MBOX_HEADER(size) ((PCI_VENDOR_ID_INTEL) | \
61
(MBOX_OBJ_STAGING << 16) | \
62
((u64)((size) / sizeof(u32)) << 32))
63
64
/* The size of each mailbox header */
65
#define MBOX_HEADER_SIZE sizeof(u64)
66
/* The size of staging hardware response */
67
#define MBOX_RESPONSE_SIZE sizeof(u64)
68
69
#define MBOX_XACTION_TIMEOUT_MS (10 * MSEC_PER_SEC)
70
71
/* Current microcode patch used in early patching on the APs. */
72
static struct microcode_intel *ucode_patch_va __read_mostly;
73
static struct microcode_intel *ucode_patch_late __read_mostly;
74
75
/* last level cache size per core */
76
static unsigned int llc_size_per_core __ro_after_init;
77
78
/* microcode format is extended from prescott processors */
79
struct extended_signature {
80
unsigned int sig;
81
unsigned int pf;
82
unsigned int cksum;
83
};
84
85
struct extended_sigtable {
86
unsigned int count;
87
unsigned int cksum;
88
unsigned int reserved[3];
89
struct extended_signature sigs[];
90
};
91
92
/**
93
* struct staging_state - Track the current staging process state
94
*
95
* @mmio_base: MMIO base address for staging
96
* @ucode_len: Total size of the microcode image
97
* @chunk_size: Size of each data piece
98
* @bytes_sent: Total bytes transmitted so far
99
* @offset: Current offset in the microcode image
100
*/
101
struct staging_state {
102
void __iomem *mmio_base;
103
unsigned int ucode_len;
104
unsigned int chunk_size;
105
unsigned int bytes_sent;
106
unsigned int offset;
107
};
108
109
#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
110
#define EXT_HEADER_SIZE (sizeof(struct extended_sigtable))
111
#define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature))
112
113
static inline unsigned int get_totalsize(struct microcode_header_intel *hdr)
114
{
115
return hdr->datasize ? hdr->totalsize : DEFAULT_UCODE_TOTALSIZE;
116
}
117
118
static inline unsigned int exttable_size(struct extended_sigtable *et)
119
{
120
return et->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE;
121
}
122
123
void intel_collect_cpu_info(struct cpu_signature *sig)
124
{
125
sig->sig = cpuid_eax(1);
126
sig->pf = 0;
127
sig->rev = intel_get_microcode_revision();
128
129
if (IFM(x86_family(sig->sig), x86_model(sig->sig)) >= INTEL_PENTIUM_III_DESCHUTES) {
130
unsigned int val[2];
131
132
/* get processor flags from MSR 0x17 */
133
native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
134
sig->pf = 1 << ((val[1] >> 18) & 7);
135
}
136
}
137
EXPORT_SYMBOL_GPL(intel_collect_cpu_info);
138
139
static inline bool cpu_signatures_match(struct cpu_signature *s1, unsigned int sig2,
140
unsigned int pf2)
141
{
142
if (s1->sig != sig2)
143
return false;
144
145
/* Processor flags are either both 0 or they intersect. */
146
return ((!s1->pf && !pf2) || (s1->pf & pf2));
147
}
148
149
bool intel_find_matching_signature(void *mc, struct cpu_signature *sig)
150
{
151
struct microcode_header_intel *mc_hdr = mc;
152
struct extended_signature *ext_sig;
153
struct extended_sigtable *ext_hdr;
154
int i;
155
156
if (cpu_signatures_match(sig, mc_hdr->sig, mc_hdr->pf))
157
return true;
158
159
/* Look for ext. headers: */
160
if (get_totalsize(mc_hdr) <= intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE)
161
return false;
162
163
ext_hdr = mc + intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE;
164
ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
165
166
for (i = 0; i < ext_hdr->count; i++) {
167
if (cpu_signatures_match(sig, ext_sig->sig, ext_sig->pf))
168
return true;
169
ext_sig++;
170
}
171
return 0;
172
}
173
EXPORT_SYMBOL_GPL(intel_find_matching_signature);
174
175
/**
176
* intel_microcode_sanity_check() - Sanity check microcode file.
177
* @mc: Pointer to the microcode file contents.
178
* @print_err: Display failure reason if true, silent if false.
179
* @hdr_type: Type of file, i.e. normal microcode file or In Field Scan file.
180
* Validate if the microcode header type matches with the type
181
* specified here.
182
*
183
* Validate certain header fields and verify if computed checksum matches
184
* with the one specified in the header.
185
*
186
* Return: 0 if the file passes all the checks, -EINVAL if any of the checks
187
* fail.
188
*/
189
int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type)
190
{
191
unsigned long total_size, data_size, ext_table_size;
192
struct microcode_header_intel *mc_header = mc;
193
struct extended_sigtable *ext_header = NULL;
194
u32 sum, orig_sum, ext_sigcount = 0, i;
195
struct extended_signature *ext_sig;
196
197
total_size = get_totalsize(mc_header);
198
data_size = intel_microcode_get_datasize(mc_header);
199
200
if (data_size + MC_HEADER_SIZE > total_size) {
201
if (print_err)
202
pr_err("Error: bad microcode data file size.\n");
203
return -EINVAL;
204
}
205
206
if (mc_header->ldrver != 1 || mc_header->hdrver != hdr_type) {
207
if (print_err)
208
pr_err("Error: invalid/unknown microcode update format. Header type %d\n",
209
mc_header->hdrver);
210
return -EINVAL;
211
}
212
213
ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
214
if (ext_table_size) {
215
u32 ext_table_sum = 0;
216
u32 *ext_tablep;
217
218
if (ext_table_size < EXT_HEADER_SIZE ||
219
((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
220
if (print_err)
221
pr_err("Error: truncated extended signature table.\n");
222
return -EINVAL;
223
}
224
225
ext_header = mc + MC_HEADER_SIZE + data_size;
226
if (ext_table_size != exttable_size(ext_header)) {
227
if (print_err)
228
pr_err("Error: extended signature table size mismatch.\n");
229
return -EFAULT;
230
}
231
232
ext_sigcount = ext_header->count;
233
234
/*
235
* Check extended table checksum: the sum of all dwords that
236
* comprise a valid table must be 0.
237
*/
238
ext_tablep = (u32 *)ext_header;
239
240
i = ext_table_size / sizeof(u32);
241
while (i--)
242
ext_table_sum += ext_tablep[i];
243
244
if (ext_table_sum) {
245
if (print_err)
246
pr_warn("Bad extended signature table checksum, aborting.\n");
247
return -EINVAL;
248
}
249
}
250
251
/*
252
* Calculate the checksum of update data and header. The checksum of
253
* valid update data and header including the extended signature table
254
* must be 0.
255
*/
256
orig_sum = 0;
257
i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
258
while (i--)
259
orig_sum += ((u32 *)mc)[i];
260
261
if (orig_sum) {
262
if (print_err)
263
pr_err("Bad microcode data checksum, aborting.\n");
264
return -EINVAL;
265
}
266
267
if (!ext_table_size)
268
return 0;
269
270
/*
271
* Check extended signature checksum: 0 => valid.
272
*/
273
for (i = 0; i < ext_sigcount; i++) {
274
ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
275
EXT_SIGNATURE_SIZE * i;
276
277
sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
278
(ext_sig->sig + ext_sig->pf + ext_sig->cksum);
279
if (sum) {
280
if (print_err)
281
pr_err("Bad extended signature checksum, aborting.\n");
282
return -EINVAL;
283
}
284
}
285
return 0;
286
}
287
EXPORT_SYMBOL_GPL(intel_microcode_sanity_check);
288
289
static void update_ucode_pointer(struct microcode_intel *mc)
290
{
291
kvfree(ucode_patch_va);
292
293
/*
294
* Save the virtual address for early loading and for eventual free
295
* on late loading.
296
*/
297
ucode_patch_va = mc;
298
}
299
300
static void save_microcode_patch(struct microcode_intel *patch)
301
{
302
unsigned int size = get_totalsize(&patch->hdr);
303
struct microcode_intel *mc;
304
305
mc = kvmemdup(patch, size, GFP_KERNEL);
306
if (mc)
307
update_ucode_pointer(mc);
308
else
309
pr_err("Unable to allocate microcode memory size: %u\n", size);
310
}
311
312
/* Scan blob for microcode matching the boot CPUs family, model, stepping */
313
static __init struct microcode_intel *scan_microcode(void *data, size_t size,
314
struct ucode_cpu_info *uci,
315
bool save)
316
{
317
struct microcode_header_intel *mc_header;
318
struct microcode_intel *patch = NULL;
319
u32 cur_rev = uci->cpu_sig.rev;
320
unsigned int mc_size;
321
322
for (; size >= sizeof(struct microcode_header_intel); size -= mc_size, data += mc_size) {
323
mc_header = (struct microcode_header_intel *)data;
324
325
mc_size = get_totalsize(mc_header);
326
if (!mc_size || mc_size > size ||
327
intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0)
328
break;
329
330
if (!intel_find_matching_signature(data, &uci->cpu_sig))
331
continue;
332
333
/*
334
* For saving the early microcode, find the matching revision which
335
* was loaded on the BSP.
336
*
337
* On the BSP during early boot, find a newer revision than
338
* actually loaded in the CPU.
339
*/
340
if (save) {
341
if (cur_rev != mc_header->rev)
342
continue;
343
} else if (cur_rev >= mc_header->rev) {
344
continue;
345
}
346
347
patch = data;
348
cur_rev = mc_header->rev;
349
}
350
351
return size ? NULL : patch;
352
}
353
354
static inline u32 read_mbox_dword(void __iomem *mmio_base)
355
{
356
u32 dword = readl(mmio_base + MBOX_RDDATA_OFFSET);
357
358
/* Acknowledge read completion to the staging hardware */
359
writel(0, mmio_base + MBOX_RDDATA_OFFSET);
360
return dword;
361
}
362
363
static inline void write_mbox_dword(void __iomem *mmio_base, u32 dword)
364
{
365
writel(dword, mmio_base + MBOX_WRDATA_OFFSET);
366
}
367
368
static inline u64 read_mbox_header(void __iomem *mmio_base)
369
{
370
u32 high, low;
371
372
low = read_mbox_dword(mmio_base);
373
high = read_mbox_dword(mmio_base);
374
375
return ((u64)high << 32) | low;
376
}
377
378
static inline void write_mbox_header(void __iomem *mmio_base, u64 value)
379
{
380
write_mbox_dword(mmio_base, value);
381
write_mbox_dword(mmio_base, value >> 32);
382
}
383
384
static void write_mbox_data(void __iomem *mmio_base, u32 *chunk, unsigned int chunk_bytes)
385
{
386
int i;
387
388
/*
389
* The MMIO space is mapped as Uncached (UC). Each write arrives
390
* at the device as an individual transaction in program order.
391
* The device can then reassemble the sequence accordingly.
392
*/
393
for (i = 0; i < chunk_bytes / sizeof(u32); i++)
394
write_mbox_dword(mmio_base, chunk[i]);
395
}
396
397
/*
398
* Prepare for a new microcode transfer: reset hardware and record the
399
* image size.
400
*/
401
static void init_stage(struct staging_state *ss)
402
{
403
ss->ucode_len = get_totalsize(&ucode_patch_late->hdr);
404
405
/*
406
* Abort any ongoing process, effectively resetting the device.
407
* Unlike regular mailbox data processing requests, this
408
* operation does not require a status check.
409
*/
410
writel(MASK_MBOX_CTRL_ABORT, ss->mmio_base + MBOX_CONTROL_OFFSET);
411
}
412
413
/*
414
* Update the chunk size and decide whether another chunk can be sent.
415
* This accounts for remaining data and retry limits.
416
*/
417
static bool can_send_next_chunk(struct staging_state *ss, int *err)
418
{
419
/* A page size or remaining bytes if this is the final chunk */
420
ss->chunk_size = min(PAGE_SIZE, ss->ucode_len - ss->offset);
421
422
/*
423
* Each microcode image is divided into chunks, each at most
424
* one page size. A 10-chunk image would typically require 10
425
* transactions.
426
*
427
* However, the hardware managing the mailbox has limited
428
* resources and may not cache the entire image, potentially
429
* requesting the same chunk multiple times.
430
*
431
* To tolerate this behavior, allow up to twice the expected
432
* number of transactions (i.e., a 10-chunk image can take up to
433
* 20 attempts).
434
*
435
* If the number of attempts exceeds this limit, treat it as
436
* exceeding the maximum allowed transfer size.
437
*/
438
if (ss->bytes_sent + ss->chunk_size > ss->ucode_len * 2) {
439
*err = -EMSGSIZE;
440
return false;
441
}
442
443
*err = 0;
444
return true;
445
}
446
447
/*
448
* The hardware indicates completion by returning a sentinel end offset.
449
*/
450
static inline bool is_end_offset(u32 offset)
451
{
452
return offset == UINT_MAX;
453
}
454
455
/*
456
* Determine whether staging is complete: either the hardware signaled
457
* the end offset, or no more transactions are permitted (retry limit
458
* reached).
459
*/
460
static inline bool staging_is_complete(struct staging_state *ss, int *err)
461
{
462
return is_end_offset(ss->offset) || !can_send_next_chunk(ss, err);
463
}
464
465
/*
466
* Wait for the hardware to complete a transaction.
467
* Return 0 on success, or an error code on failure.
468
*/
469
static int wait_for_transaction(struct staging_state *ss)
470
{
471
u32 timeout, status;
472
473
/* Allow time for hardware to complete the operation: */
474
for (timeout = 0; timeout < MBOX_XACTION_TIMEOUT_MS; timeout++) {
475
msleep(1);
476
477
status = readl(ss->mmio_base + MBOX_STATUS_OFFSET);
478
/* Break out early if the hardware is ready: */
479
if (status & MASK_MBOX_STATUS_READY)
480
break;
481
}
482
483
/* Check for explicit error response */
484
if (status & MASK_MBOX_STATUS_ERROR)
485
return -EIO;
486
487
/*
488
* Hardware has neither responded to the action nor signaled any
489
* error. Treat this as a timeout.
490
*/
491
if (!(status & MASK_MBOX_STATUS_READY))
492
return -ETIMEDOUT;
493
494
return 0;
495
}
496
497
/*
498
* Transmit a chunk of the microcode image to the hardware.
499
* Return 0 on success, or an error code on failure.
500
*/
501
static int send_data_chunk(struct staging_state *ss, void *ucode_ptr)
502
{
503
u32 *src_chunk = ucode_ptr + ss->offset;
504
u16 mbox_size;
505
506
/*
507
* Write a 'request' mailbox object in this order:
508
* 1. Mailbox header includes total size
509
* 2. Command header specifies the load operation
510
* 3. Data section contains a microcode chunk
511
*
512
* Thus, the mailbox size is two headers plus the chunk size.
513
*/
514
mbox_size = MBOX_HEADER_SIZE * 2 + ss->chunk_size;
515
write_mbox_header(ss->mmio_base, MBOX_HEADER(mbox_size));
516
write_mbox_header(ss->mmio_base, MBOX_CMD_LOAD);
517
write_mbox_data(ss->mmio_base, src_chunk, ss->chunk_size);
518
ss->bytes_sent += ss->chunk_size;
519
520
/* Notify the hardware that the mailbox is ready for processing. */
521
writel(MASK_MBOX_CTRL_GO, ss->mmio_base + MBOX_CONTROL_OFFSET);
522
523
return wait_for_transaction(ss);
524
}
525
526
/*
527
* Retrieve the next offset from the hardware response.
528
* Return 0 on success, or an error code on failure.
529
*/
530
static int fetch_next_offset(struct staging_state *ss)
531
{
532
const u64 expected_header = MBOX_HEADER(MBOX_HEADER_SIZE + MBOX_RESPONSE_SIZE);
533
u32 offset, status;
534
u64 header;
535
536
/*
537
* The 'response' mailbox returns three fields, in order:
538
* 1. Header
539
* 2. Next offset in the microcode image
540
* 3. Status flags
541
*/
542
header = read_mbox_header(ss->mmio_base);
543
offset = read_mbox_dword(ss->mmio_base);
544
status = read_mbox_dword(ss->mmio_base);
545
546
/* All valid responses must start with the expected header. */
547
if (header != expected_header) {
548
pr_err_once("staging: invalid response header (0x%llx)\n", header);
549
return -EBADR;
550
}
551
552
/*
553
* Verify the offset: If not at the end marker, it must not
554
* exceed the microcode image length.
555
*/
556
if (!is_end_offset(offset) && offset > ss->ucode_len) {
557
pr_err_once("staging: invalid offset (%u) past the image end (%u)\n",
558
offset, ss->ucode_len);
559
return -EINVAL;
560
}
561
562
/* Hardware may report errors explicitly in the status field */
563
if (status & MASK_MBOX_RESP_ERROR)
564
return -EPROTO;
565
566
ss->offset = offset;
567
return 0;
568
}
569
570
/*
571
* Handle the staging process using the mailbox MMIO interface. The
572
* microcode image is transferred in chunks until completion.
573
* Return 0 on success or an error code on failure.
574
*/
575
static int do_stage(u64 mmio_pa)
576
{
577
struct staging_state ss = {};
578
int err;
579
580
ss.mmio_base = ioremap(mmio_pa, MBOX_REG_NUM * MBOX_REG_SIZE);
581
if (WARN_ON_ONCE(!ss.mmio_base))
582
return -EADDRNOTAVAIL;
583
584
init_stage(&ss);
585
586
/* Perform the staging process while within the retry limit */
587
while (!staging_is_complete(&ss, &err)) {
588
/* Send a chunk of microcode each time: */
589
err = send_data_chunk(&ss, ucode_patch_late);
590
if (err)
591
break;
592
/*
593
* Then, ask the hardware which piece of the image it
594
* needs next. The same piece may be sent more than once.
595
*/
596
err = fetch_next_offset(&ss);
597
if (err)
598
break;
599
}
600
601
iounmap(ss.mmio_base);
602
603
return err;
604
}
605
606
static void stage_microcode(void)
607
{
608
unsigned int pkg_id = UINT_MAX;
609
int cpu, err;
610
u64 mmio_pa;
611
612
if (!IS_ALIGNED(get_totalsize(&ucode_patch_late->hdr), sizeof(u32))) {
613
pr_err("Microcode image 32-bit misaligned (0x%x), staging failed.\n",
614
get_totalsize(&ucode_patch_late->hdr));
615
return;
616
}
617
618
lockdep_assert_cpus_held();
619
620
/*
621
* The MMIO address is unique per package, and all the SMT
622
* primary threads are online here. Find each MMIO space by
623
* their package IDs to avoid duplicate staging.
624
*/
625
for_each_cpu(cpu, cpu_primary_thread_mask) {
626
if (topology_logical_package_id(cpu) == pkg_id)
627
continue;
628
629
pkg_id = topology_logical_package_id(cpu);
630
631
err = rdmsrq_on_cpu(cpu, MSR_IA32_MCU_STAGING_MBOX_ADDR, &mmio_pa);
632
if (WARN_ON_ONCE(err))
633
return;
634
635
err = do_stage(mmio_pa);
636
if (err) {
637
pr_err("Error: staging failed (%d) for CPU%d at package %u.\n",
638
err, cpu, pkg_id);
639
return;
640
}
641
}
642
643
pr_info("Staging of patch revision 0x%x succeeded.\n", ucode_patch_late->hdr.rev);
644
}
645
646
static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci,
647
struct microcode_intel *mc,
648
u32 *cur_rev)
649
{
650
u32 rev;
651
652
if (!mc)
653
return UCODE_NFOUND;
654
655
/*
656
* Save us the MSR write below - which is a particular expensive
657
* operation - when the other hyperthread has updated the microcode
658
* already.
659
*/
660
*cur_rev = intel_get_microcode_revision();
661
if (*cur_rev >= mc->hdr.rev) {
662
uci->cpu_sig.rev = *cur_rev;
663
return UCODE_OK;
664
}
665
666
/* write microcode via MSR 0x79 */
667
native_wrmsrq(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
668
669
rev = intel_get_microcode_revision();
670
if (rev != mc->hdr.rev)
671
return UCODE_ERROR;
672
673
uci->cpu_sig.rev = rev;
674
return UCODE_UPDATED;
675
}
676
677
static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci)
678
{
679
struct microcode_intel *mc = uci->mc;
680
u32 cur_rev;
681
682
return __apply_microcode(uci, mc, &cur_rev);
683
}
684
685
static __init bool load_builtin_intel_microcode(struct cpio_data *cp)
686
{
687
unsigned int eax = 1, ebx, ecx = 0, edx;
688
struct firmware fw;
689
char name[30];
690
691
if (IS_ENABLED(CONFIG_X86_32))
692
return false;
693
694
native_cpuid(&eax, &ebx, &ecx, &edx);
695
696
sprintf(name, "intel-ucode/%02x-%02x-%02x",
697
x86_family(eax), x86_model(eax), x86_stepping(eax));
698
699
if (firmware_request_builtin(&fw, name)) {
700
cp->size = fw.size;
701
cp->data = (void *)fw.data;
702
return true;
703
}
704
return false;
705
}
706
707
static __init struct microcode_intel *get_microcode_blob(struct ucode_cpu_info *uci, bool save)
708
{
709
struct cpio_data cp;
710
711
intel_collect_cpu_info(&uci->cpu_sig);
712
713
if (!load_builtin_intel_microcode(&cp))
714
cp = find_microcode_in_initrd(ucode_path);
715
716
if (!(cp.data && cp.size))
717
return NULL;
718
719
return scan_microcode(cp.data, cp.size, uci, save);
720
}
721
722
/*
723
* Invoked from an early init call to save the microcode blob which was
724
* selected during early boot when mm was not usable. The microcode must be
725
* saved because initrd is going away. It's an early init call so the APs
726
* just can use the pointer and do not have to scan initrd/builtin firmware
727
* again.
728
*/
729
static int __init save_builtin_microcode(void)
730
{
731
struct ucode_cpu_info uci;
732
733
if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED)
734
return 0;
735
736
if (microcode_loader_disabled() || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
737
return 0;
738
739
uci.mc = get_microcode_blob(&uci, true);
740
if (uci.mc)
741
save_microcode_patch(uci.mc);
742
return 0;
743
}
744
early_initcall(save_builtin_microcode);
745
746
/* Load microcode on BSP from initrd or builtin blobs */
747
void __init load_ucode_intel_bsp(struct early_load_data *ed)
748
{
749
struct ucode_cpu_info uci;
750
751
uci.mc = get_microcode_blob(&uci, false);
752
ed->old_rev = uci.cpu_sig.rev;
753
754
if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED) {
755
ucode_patch_va = UCODE_BSP_LOADED;
756
ed->new_rev = uci.cpu_sig.rev;
757
}
758
}
759
760
void load_ucode_intel_ap(void)
761
{
762
struct ucode_cpu_info uci;
763
764
uci.mc = ucode_patch_va;
765
if (uci.mc)
766
apply_microcode_early(&uci);
767
}
768
769
/* Reload microcode on resume */
770
void reload_ucode_intel(void)
771
{
772
struct ucode_cpu_info uci = { .mc = ucode_patch_va, };
773
774
if (uci.mc)
775
apply_microcode_early(&uci);
776
}
777
778
static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
779
{
780
intel_collect_cpu_info(csig);
781
return 0;
782
}
783
784
static enum ucode_state apply_microcode_late(int cpu)
785
{
786
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
787
struct microcode_intel *mc = ucode_patch_late;
788
enum ucode_state ret;
789
u32 cur_rev;
790
791
if (WARN_ON_ONCE(smp_processor_id() != cpu))
792
return UCODE_ERROR;
793
794
ret = __apply_microcode(uci, mc, &cur_rev);
795
if (ret != UCODE_UPDATED && ret != UCODE_OK)
796
return ret;
797
798
cpu_data(cpu).microcode = uci->cpu_sig.rev;
799
if (!cpu)
800
boot_cpu_data.microcode = uci->cpu_sig.rev;
801
802
return ret;
803
}
804
805
static bool ucode_validate_minrev(struct microcode_header_intel *mc_header)
806
{
807
int cur_rev = boot_cpu_data.microcode;
808
809
/*
810
* When late-loading, ensure the header declares a minimum revision
811
* required to perform a late-load. The previously reserved field
812
* is 0 in older microcode blobs.
813
*/
814
if (!mc_header->min_req_ver) {
815
pr_info("Unsafe microcode update: Microcode header does not specify a required min version\n");
816
return false;
817
}
818
819
/*
820
* Check whether the current revision is either greater or equal to
821
* to the minimum revision specified in the header.
822
*/
823
if (cur_rev < mc_header->min_req_ver) {
824
pr_info("Unsafe microcode update: Current revision 0x%x too old\n", cur_rev);
825
pr_info("Current should be at 0x%x or higher. Use early loading instead\n", mc_header->min_req_ver);
826
return false;
827
}
828
return true;
829
}
830
831
static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter)
832
{
833
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
834
bool is_safe, new_is_safe = false;
835
int cur_rev = uci->cpu_sig.rev;
836
unsigned int curr_mc_size = 0;
837
u8 *new_mc = NULL, *mc = NULL;
838
839
while (iov_iter_count(iter)) {
840
struct microcode_header_intel mc_header;
841
unsigned int mc_size, data_size;
842
u8 *data;
843
844
if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
845
pr_err("error! Truncated or inaccessible header in microcode data file\n");
846
goto fail;
847
}
848
849
mc_size = get_totalsize(&mc_header);
850
if (mc_size < sizeof(mc_header)) {
851
pr_err("error! Bad data in microcode data file (totalsize too small)\n");
852
goto fail;
853
}
854
data_size = mc_size - sizeof(mc_header);
855
if (data_size > iov_iter_count(iter)) {
856
pr_err("error! Bad data in microcode data file (truncated file?)\n");
857
goto fail;
858
}
859
860
/* For performance reasons, reuse mc area when possible */
861
if (!mc || mc_size > curr_mc_size) {
862
kvfree(mc);
863
mc = kvmalloc(mc_size, GFP_KERNEL);
864
if (!mc)
865
goto fail;
866
curr_mc_size = mc_size;
867
}
868
869
memcpy(mc, &mc_header, sizeof(mc_header));
870
data = mc + sizeof(mc_header);
871
if (!copy_from_iter_full(data, data_size, iter) ||
872
intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0)
873
goto fail;
874
875
if (cur_rev >= mc_header.rev)
876
continue;
877
878
if (!intel_find_matching_signature(mc, &uci->cpu_sig))
879
continue;
880
881
is_safe = ucode_validate_minrev(&mc_header);
882
if (force_minrev && !is_safe)
883
continue;
884
885
kvfree(new_mc);
886
cur_rev = mc_header.rev;
887
new_mc = mc;
888
new_is_safe = is_safe;
889
mc = NULL;
890
}
891
892
if (iov_iter_count(iter))
893
goto fail;
894
895
kvfree(mc);
896
if (!new_mc)
897
return UCODE_NFOUND;
898
899
ucode_patch_late = (struct microcode_intel *)new_mc;
900
return new_is_safe ? UCODE_NEW_SAFE : UCODE_NEW;
901
902
fail:
903
kvfree(mc);
904
kvfree(new_mc);
905
return UCODE_ERROR;
906
}
907
908
static bool is_blacklisted(unsigned int cpu)
909
{
910
struct cpuinfo_x86 *c = &cpu_data(cpu);
911
912
/*
913
* Late loading on model 79 with microcode revision less than 0x0b000021
914
* and LLC size per core bigger than 2.5MB may result in a system hang.
915
* This behavior is documented in item BDX90, #334165 (Intel Xeon
916
* Processor E7-8800/4800 v4 Product Family).
917
*/
918
if (c->x86_vfm == INTEL_BROADWELL_X &&
919
c->x86_stepping == 0x01 &&
920
llc_size_per_core > 2621440 &&
921
c->microcode < 0x0b000021) {
922
pr_err_once("Erratum BDX90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
923
pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
924
return true;
925
}
926
927
return false;
928
}
929
930
static enum ucode_state request_microcode_fw(int cpu, struct device *device)
931
{
932
struct cpuinfo_x86 *c = &cpu_data(cpu);
933
const struct firmware *firmware;
934
struct iov_iter iter;
935
enum ucode_state ret;
936
struct kvec kvec;
937
char name[30];
938
939
if (is_blacklisted(cpu))
940
return UCODE_NFOUND;
941
942
sprintf(name, "intel-ucode/%02x-%02x-%02x",
943
c->x86, c->x86_model, c->x86_stepping);
944
945
if (request_firmware_direct(&firmware, name, device)) {
946
pr_debug("data file %s load failed\n", name);
947
return UCODE_NFOUND;
948
}
949
950
kvec.iov_base = (void *)firmware->data;
951
kvec.iov_len = firmware->size;
952
iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size);
953
ret = parse_microcode_blobs(cpu, &iter);
954
955
release_firmware(firmware);
956
957
return ret;
958
}
959
960
static void finalize_late_load(int result)
961
{
962
if (!result)
963
update_ucode_pointer(ucode_patch_late);
964
else
965
kvfree(ucode_patch_late);
966
ucode_patch_late = NULL;
967
}
968
969
static struct microcode_ops microcode_intel_ops = {
970
.request_microcode_fw = request_microcode_fw,
971
.collect_cpu_info = collect_cpu_info,
972
.apply_microcode = apply_microcode_late,
973
.finalize_late_load = finalize_late_load,
974
.stage_microcode = stage_microcode,
975
.use_nmi = IS_ENABLED(CONFIG_X86_64),
976
};
977
978
static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c)
979
{
980
u64 llc_size = c->x86_cache_size * 1024ULL;
981
982
do_div(llc_size, topology_num_cores_per_package());
983
llc_size_per_core = (unsigned int)llc_size;
984
}
985
986
static __init bool staging_available(void)
987
{
988
u64 val;
989
990
val = x86_read_arch_cap_msr();
991
if (!(val & ARCH_CAP_MCU_ENUM))
992
return false;
993
994
rdmsrq(MSR_IA32_MCU_ENUMERATION, val);
995
return !!(val & MCU_STAGING);
996
}
997
998
struct microcode_ops * __init init_intel_microcode(void)
999
{
1000
struct cpuinfo_x86 *c = &boot_cpu_data;
1001
1002
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
1003
cpu_has(c, X86_FEATURE_IA64)) {
1004
pr_err("Intel CPU family 0x%x not supported\n", c->x86);
1005
return NULL;
1006
}
1007
1008
if (staging_available()) {
1009
microcode_intel_ops.use_staging = true;
1010
pr_info("Enabled staging feature.\n");
1011
}
1012
1013
calc_llc_size_per_core(c);
1014
1015
return &microcode_intel_ops;
1016
}
1017
1018