Path: blob/master/arch/x86/kernel/cpu/microcode/intel.c
49062 views
// SPDX-License-Identifier: GPL-2.0-or-later1/*2* Intel CPU Microcode Update Driver for Linux3*4* Copyright (C) 2000-2006 Tigran Aivazian <[email protected]>5* 2006 Shaohua Li <[email protected]>6*7* Intel CPU microcode early update for Linux8*9* Copyright (C) 2012 Fenghua Yu <[email protected]>10* H Peter Anvin" <[email protected]>11*/12#define pr_fmt(fmt) "microcode: " fmt13#include <linux/earlycpio.h>14#include <linux/firmware.h>15#include <linux/pci_ids.h>16#include <linux/uaccess.h>17#include <linux/initrd.h>18#include <linux/kernel.h>19#include <linux/delay.h>20#include <linux/slab.h>21#include <linux/cpu.h>22#include <linux/uio.h>23#include <linux/io.h>24#include <linux/mm.h>2526#include <asm/cpu_device_id.h>27#include <asm/processor.h>28#include <asm/tlbflush.h>29#include <asm/setup.h>30#include <asm/msr.h>3132#include "internal.h"3334static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";3536#define UCODE_BSP_LOADED ((struct microcode_intel *)0x1UL)3738/* Defines for the microcode staging mailbox interface */39#define MBOX_REG_NUM 440#define MBOX_REG_SIZE sizeof(u32)4142#define MBOX_CONTROL_OFFSET 0x043#define MBOX_STATUS_OFFSET 0x444#define MBOX_WRDATA_OFFSET 0x845#define MBOX_RDDATA_OFFSET 0xc4647#define MASK_MBOX_CTRL_ABORT BIT(0)48#define MASK_MBOX_CTRL_GO BIT(31)4950#define MASK_MBOX_STATUS_ERROR BIT(2)51#define MASK_MBOX_STATUS_READY BIT(31)5253#define MASK_MBOX_RESP_SUCCESS BIT(0)54#define MASK_MBOX_RESP_PROGRESS BIT(1)55#define MASK_MBOX_RESP_ERROR BIT(2)5657#define MBOX_CMD_LOAD 0x358#define MBOX_OBJ_STAGING 0xb59#define MBOX_HEADER(size) ((PCI_VENDOR_ID_INTEL) | \60(MBOX_OBJ_STAGING << 16) | \61((u64)((size) / sizeof(u32)) << 32))6263/* The size of each mailbox header */64#define MBOX_HEADER_SIZE sizeof(u64)65/* The size of staging hardware response */66#define MBOX_RESPONSE_SIZE sizeof(u64)6768#define MBOX_XACTION_TIMEOUT_MS (10 * MSEC_PER_SEC)6970/* Current microcode patch used in early patching on the APs. */71static struct microcode_intel *ucode_patch_va __read_mostly;72static struct microcode_intel *ucode_patch_late __read_mostly;7374/* last level cache size per core */75static unsigned int llc_size_per_core __ro_after_init;7677/* microcode format is extended from prescott processors */78struct extended_signature {79unsigned int sig;80unsigned int pf;81unsigned int cksum;82};8384struct extended_sigtable {85unsigned int count;86unsigned int cksum;87unsigned int reserved[3];88struct extended_signature sigs[];89};9091/**92* struct staging_state - Track the current staging process state93*94* @mmio_base: MMIO base address for staging95* @ucode_len: Total size of the microcode image96* @chunk_size: Size of each data piece97* @bytes_sent: Total bytes transmitted so far98* @offset: Current offset in the microcode image99*/100struct staging_state {101void __iomem *mmio_base;102unsigned int ucode_len;103unsigned int chunk_size;104unsigned int bytes_sent;105unsigned int offset;106};107108#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)109#define EXT_HEADER_SIZE (sizeof(struct extended_sigtable))110#define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature))111112static inline unsigned int get_totalsize(struct microcode_header_intel *hdr)113{114return hdr->datasize ? hdr->totalsize : DEFAULT_UCODE_TOTALSIZE;115}116117static inline unsigned int exttable_size(struct extended_sigtable *et)118{119return et->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE;120}121122void intel_collect_cpu_info(struct cpu_signature *sig)123{124sig->sig = cpuid_eax(1);125sig->pf = 0;126sig->rev = intel_get_microcode_revision();127128if (IFM(x86_family(sig->sig), x86_model(sig->sig)) >= INTEL_PENTIUM_III_DESCHUTES) {129unsigned int val[2];130131/* get processor flags from MSR 0x17 */132native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);133sig->pf = 1 << ((val[1] >> 18) & 7);134}135}136EXPORT_SYMBOL_GPL(intel_collect_cpu_info);137138static inline bool cpu_signatures_match(struct cpu_signature *s1, unsigned int sig2,139unsigned int pf2)140{141if (s1->sig != sig2)142return false;143144/* Processor flags are either both 0 or they intersect. */145return ((!s1->pf && !pf2) || (s1->pf & pf2));146}147148bool intel_find_matching_signature(void *mc, struct cpu_signature *sig)149{150struct microcode_header_intel *mc_hdr = mc;151struct extended_signature *ext_sig;152struct extended_sigtable *ext_hdr;153int i;154155if (cpu_signatures_match(sig, mc_hdr->sig, mc_hdr->pf))156return true;157158/* Look for ext. headers: */159if (get_totalsize(mc_hdr) <= intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE)160return false;161162ext_hdr = mc + intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE;163ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;164165for (i = 0; i < ext_hdr->count; i++) {166if (cpu_signatures_match(sig, ext_sig->sig, ext_sig->pf))167return true;168ext_sig++;169}170return 0;171}172EXPORT_SYMBOL_GPL(intel_find_matching_signature);173174/**175* intel_microcode_sanity_check() - Sanity check microcode file.176* @mc: Pointer to the microcode file contents.177* @print_err: Display failure reason if true, silent if false.178* @hdr_type: Type of file, i.e. normal microcode file or In Field Scan file.179* Validate if the microcode header type matches with the type180* specified here.181*182* Validate certain header fields and verify if computed checksum matches183* with the one specified in the header.184*185* Return: 0 if the file passes all the checks, -EINVAL if any of the checks186* fail.187*/188int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type)189{190unsigned long total_size, data_size, ext_table_size;191struct microcode_header_intel *mc_header = mc;192struct extended_sigtable *ext_header = NULL;193u32 sum, orig_sum, ext_sigcount = 0, i;194struct extended_signature *ext_sig;195196total_size = get_totalsize(mc_header);197data_size = intel_microcode_get_datasize(mc_header);198199if (data_size + MC_HEADER_SIZE > total_size) {200if (print_err)201pr_err("Error: bad microcode data file size.\n");202return -EINVAL;203}204205if (mc_header->ldrver != 1 || mc_header->hdrver != hdr_type) {206if (print_err)207pr_err("Error: invalid/unknown microcode update format. Header type %d\n",208mc_header->hdrver);209return -EINVAL;210}211212ext_table_size = total_size - (MC_HEADER_SIZE + data_size);213if (ext_table_size) {214u32 ext_table_sum = 0;215u32 *ext_tablep;216217if (ext_table_size < EXT_HEADER_SIZE ||218((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {219if (print_err)220pr_err("Error: truncated extended signature table.\n");221return -EINVAL;222}223224ext_header = mc + MC_HEADER_SIZE + data_size;225if (ext_table_size != exttable_size(ext_header)) {226if (print_err)227pr_err("Error: extended signature table size mismatch.\n");228return -EFAULT;229}230231ext_sigcount = ext_header->count;232233/*234* Check extended table checksum: the sum of all dwords that235* comprise a valid table must be 0.236*/237ext_tablep = (u32 *)ext_header;238239i = ext_table_size / sizeof(u32);240while (i--)241ext_table_sum += ext_tablep[i];242243if (ext_table_sum) {244if (print_err)245pr_warn("Bad extended signature table checksum, aborting.\n");246return -EINVAL;247}248}249250/*251* Calculate the checksum of update data and header. The checksum of252* valid update data and header including the extended signature table253* must be 0.254*/255orig_sum = 0;256i = (MC_HEADER_SIZE + data_size) / sizeof(u32);257while (i--)258orig_sum += ((u32 *)mc)[i];259260if (orig_sum) {261if (print_err)262pr_err("Bad microcode data checksum, aborting.\n");263return -EINVAL;264}265266if (!ext_table_size)267return 0;268269/*270* Check extended signature checksum: 0 => valid.271*/272for (i = 0; i < ext_sigcount; i++) {273ext_sig = (void *)ext_header + EXT_HEADER_SIZE +274EXT_SIGNATURE_SIZE * i;275276sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -277(ext_sig->sig + ext_sig->pf + ext_sig->cksum);278if (sum) {279if (print_err)280pr_err("Bad extended signature checksum, aborting.\n");281return -EINVAL;282}283}284return 0;285}286EXPORT_SYMBOL_GPL(intel_microcode_sanity_check);287288static void update_ucode_pointer(struct microcode_intel *mc)289{290kvfree(ucode_patch_va);291292/*293* Save the virtual address for early loading and for eventual free294* on late loading.295*/296ucode_patch_va = mc;297}298299static void save_microcode_patch(struct microcode_intel *patch)300{301unsigned int size = get_totalsize(&patch->hdr);302struct microcode_intel *mc;303304mc = kvmemdup(patch, size, GFP_KERNEL);305if (mc)306update_ucode_pointer(mc);307else308pr_err("Unable to allocate microcode memory size: %u\n", size);309}310311/* Scan blob for microcode matching the boot CPUs family, model, stepping */312static __init struct microcode_intel *scan_microcode(void *data, size_t size,313struct ucode_cpu_info *uci,314bool save)315{316struct microcode_header_intel *mc_header;317struct microcode_intel *patch = NULL;318u32 cur_rev = uci->cpu_sig.rev;319unsigned int mc_size;320321for (; size >= sizeof(struct microcode_header_intel); size -= mc_size, data += mc_size) {322mc_header = (struct microcode_header_intel *)data;323324mc_size = get_totalsize(mc_header);325if (!mc_size || mc_size > size ||326intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0)327break;328329if (!intel_find_matching_signature(data, &uci->cpu_sig))330continue;331332/*333* For saving the early microcode, find the matching revision which334* was loaded on the BSP.335*336* On the BSP during early boot, find a newer revision than337* actually loaded in the CPU.338*/339if (save) {340if (cur_rev != mc_header->rev)341continue;342} else if (cur_rev >= mc_header->rev) {343continue;344}345346patch = data;347cur_rev = mc_header->rev;348}349350return size ? NULL : patch;351}352353static inline u32 read_mbox_dword(void __iomem *mmio_base)354{355u32 dword = readl(mmio_base + MBOX_RDDATA_OFFSET);356357/* Acknowledge read completion to the staging hardware */358writel(0, mmio_base + MBOX_RDDATA_OFFSET);359return dword;360}361362static inline void write_mbox_dword(void __iomem *mmio_base, u32 dword)363{364writel(dword, mmio_base + MBOX_WRDATA_OFFSET);365}366367static inline u64 read_mbox_header(void __iomem *mmio_base)368{369u32 high, low;370371low = read_mbox_dword(mmio_base);372high = read_mbox_dword(mmio_base);373374return ((u64)high << 32) | low;375}376377static inline void write_mbox_header(void __iomem *mmio_base, u64 value)378{379write_mbox_dword(mmio_base, value);380write_mbox_dword(mmio_base, value >> 32);381}382383static void write_mbox_data(void __iomem *mmio_base, u32 *chunk, unsigned int chunk_bytes)384{385int i;386387/*388* The MMIO space is mapped as Uncached (UC). Each write arrives389* at the device as an individual transaction in program order.390* The device can then reassemble the sequence accordingly.391*/392for (i = 0; i < chunk_bytes / sizeof(u32); i++)393write_mbox_dword(mmio_base, chunk[i]);394}395396/*397* Prepare for a new microcode transfer: reset hardware and record the398* image size.399*/400static void init_stage(struct staging_state *ss)401{402ss->ucode_len = get_totalsize(&ucode_patch_late->hdr);403404/*405* Abort any ongoing process, effectively resetting the device.406* Unlike regular mailbox data processing requests, this407* operation does not require a status check.408*/409writel(MASK_MBOX_CTRL_ABORT, ss->mmio_base + MBOX_CONTROL_OFFSET);410}411412/*413* Update the chunk size and decide whether another chunk can be sent.414* This accounts for remaining data and retry limits.415*/416static bool can_send_next_chunk(struct staging_state *ss, int *err)417{418/* A page size or remaining bytes if this is the final chunk */419ss->chunk_size = min(PAGE_SIZE, ss->ucode_len - ss->offset);420421/*422* Each microcode image is divided into chunks, each at most423* one page size. A 10-chunk image would typically require 10424* transactions.425*426* However, the hardware managing the mailbox has limited427* resources and may not cache the entire image, potentially428* requesting the same chunk multiple times.429*430* To tolerate this behavior, allow up to twice the expected431* number of transactions (i.e., a 10-chunk image can take up to432* 20 attempts).433*434* If the number of attempts exceeds this limit, treat it as435* exceeding the maximum allowed transfer size.436*/437if (ss->bytes_sent + ss->chunk_size > ss->ucode_len * 2) {438*err = -EMSGSIZE;439return false;440}441442*err = 0;443return true;444}445446/*447* The hardware indicates completion by returning a sentinel end offset.448*/449static inline bool is_end_offset(u32 offset)450{451return offset == UINT_MAX;452}453454/*455* Determine whether staging is complete: either the hardware signaled456* the end offset, or no more transactions are permitted (retry limit457* reached).458*/459static inline bool staging_is_complete(struct staging_state *ss, int *err)460{461return is_end_offset(ss->offset) || !can_send_next_chunk(ss, err);462}463464/*465* Wait for the hardware to complete a transaction.466* Return 0 on success, or an error code on failure.467*/468static int wait_for_transaction(struct staging_state *ss)469{470u32 timeout, status;471472/* Allow time for hardware to complete the operation: */473for (timeout = 0; timeout < MBOX_XACTION_TIMEOUT_MS; timeout++) {474msleep(1);475476status = readl(ss->mmio_base + MBOX_STATUS_OFFSET);477/* Break out early if the hardware is ready: */478if (status & MASK_MBOX_STATUS_READY)479break;480}481482/* Check for explicit error response */483if (status & MASK_MBOX_STATUS_ERROR)484return -EIO;485486/*487* Hardware has neither responded to the action nor signaled any488* error. Treat this as a timeout.489*/490if (!(status & MASK_MBOX_STATUS_READY))491return -ETIMEDOUT;492493return 0;494}495496/*497* Transmit a chunk of the microcode image to the hardware.498* Return 0 on success, or an error code on failure.499*/500static int send_data_chunk(struct staging_state *ss, void *ucode_ptr)501{502u32 *src_chunk = ucode_ptr + ss->offset;503u16 mbox_size;504505/*506* Write a 'request' mailbox object in this order:507* 1. Mailbox header includes total size508* 2. Command header specifies the load operation509* 3. Data section contains a microcode chunk510*511* Thus, the mailbox size is two headers plus the chunk size.512*/513mbox_size = MBOX_HEADER_SIZE * 2 + ss->chunk_size;514write_mbox_header(ss->mmio_base, MBOX_HEADER(mbox_size));515write_mbox_header(ss->mmio_base, MBOX_CMD_LOAD);516write_mbox_data(ss->mmio_base, src_chunk, ss->chunk_size);517ss->bytes_sent += ss->chunk_size;518519/* Notify the hardware that the mailbox is ready for processing. */520writel(MASK_MBOX_CTRL_GO, ss->mmio_base + MBOX_CONTROL_OFFSET);521522return wait_for_transaction(ss);523}524525/*526* Retrieve the next offset from the hardware response.527* Return 0 on success, or an error code on failure.528*/529static int fetch_next_offset(struct staging_state *ss)530{531const u64 expected_header = MBOX_HEADER(MBOX_HEADER_SIZE + MBOX_RESPONSE_SIZE);532u32 offset, status;533u64 header;534535/*536* The 'response' mailbox returns three fields, in order:537* 1. Header538* 2. Next offset in the microcode image539* 3. Status flags540*/541header = read_mbox_header(ss->mmio_base);542offset = read_mbox_dword(ss->mmio_base);543status = read_mbox_dword(ss->mmio_base);544545/* All valid responses must start with the expected header. */546if (header != expected_header) {547pr_err_once("staging: invalid response header (0x%llx)\n", header);548return -EBADR;549}550551/*552* Verify the offset: If not at the end marker, it must not553* exceed the microcode image length.554*/555if (!is_end_offset(offset) && offset > ss->ucode_len) {556pr_err_once("staging: invalid offset (%u) past the image end (%u)\n",557offset, ss->ucode_len);558return -EINVAL;559}560561/* Hardware may report errors explicitly in the status field */562if (status & MASK_MBOX_RESP_ERROR)563return -EPROTO;564565ss->offset = offset;566return 0;567}568569/*570* Handle the staging process using the mailbox MMIO interface. The571* microcode image is transferred in chunks until completion.572* Return 0 on success or an error code on failure.573*/574static int do_stage(u64 mmio_pa)575{576struct staging_state ss = {};577int err;578579ss.mmio_base = ioremap(mmio_pa, MBOX_REG_NUM * MBOX_REG_SIZE);580if (WARN_ON_ONCE(!ss.mmio_base))581return -EADDRNOTAVAIL;582583init_stage(&ss);584585/* Perform the staging process while within the retry limit */586while (!staging_is_complete(&ss, &err)) {587/* Send a chunk of microcode each time: */588err = send_data_chunk(&ss, ucode_patch_late);589if (err)590break;591/*592* Then, ask the hardware which piece of the image it593* needs next. The same piece may be sent more than once.594*/595err = fetch_next_offset(&ss);596if (err)597break;598}599600iounmap(ss.mmio_base);601602return err;603}604605static void stage_microcode(void)606{607unsigned int pkg_id = UINT_MAX;608int cpu, err;609u64 mmio_pa;610611if (!IS_ALIGNED(get_totalsize(&ucode_patch_late->hdr), sizeof(u32))) {612pr_err("Microcode image 32-bit misaligned (0x%x), staging failed.\n",613get_totalsize(&ucode_patch_late->hdr));614return;615}616617lockdep_assert_cpus_held();618619/*620* The MMIO address is unique per package, and all the SMT621* primary threads are online here. Find each MMIO space by622* their package IDs to avoid duplicate staging.623*/624for_each_cpu(cpu, cpu_primary_thread_mask) {625if (topology_logical_package_id(cpu) == pkg_id)626continue;627628pkg_id = topology_logical_package_id(cpu);629630err = rdmsrq_on_cpu(cpu, MSR_IA32_MCU_STAGING_MBOX_ADDR, &mmio_pa);631if (WARN_ON_ONCE(err))632return;633634err = do_stage(mmio_pa);635if (err) {636pr_err("Error: staging failed (%d) for CPU%d at package %u.\n",637err, cpu, pkg_id);638return;639}640}641642pr_info("Staging of patch revision 0x%x succeeded.\n", ucode_patch_late->hdr.rev);643}644645static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci,646struct microcode_intel *mc,647u32 *cur_rev)648{649u32 rev;650651if (!mc)652return UCODE_NFOUND;653654/*655* Save us the MSR write below - which is a particular expensive656* operation - when the other hyperthread has updated the microcode657* already.658*/659*cur_rev = intel_get_microcode_revision();660if (*cur_rev >= mc->hdr.rev) {661uci->cpu_sig.rev = *cur_rev;662return UCODE_OK;663}664665/* write microcode via MSR 0x79 */666native_wrmsrq(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);667668rev = intel_get_microcode_revision();669if (rev != mc->hdr.rev)670return UCODE_ERROR;671672uci->cpu_sig.rev = rev;673return UCODE_UPDATED;674}675676static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci)677{678struct microcode_intel *mc = uci->mc;679u32 cur_rev;680681return __apply_microcode(uci, mc, &cur_rev);682}683684static __init bool load_builtin_intel_microcode(struct cpio_data *cp)685{686unsigned int eax = 1, ebx, ecx = 0, edx;687struct firmware fw;688char name[30];689690if (IS_ENABLED(CONFIG_X86_32))691return false;692693native_cpuid(&eax, &ebx, &ecx, &edx);694695sprintf(name, "intel-ucode/%02x-%02x-%02x",696x86_family(eax), x86_model(eax), x86_stepping(eax));697698if (firmware_request_builtin(&fw, name)) {699cp->size = fw.size;700cp->data = (void *)fw.data;701return true;702}703return false;704}705706static __init struct microcode_intel *get_microcode_blob(struct ucode_cpu_info *uci, bool save)707{708struct cpio_data cp;709710intel_collect_cpu_info(&uci->cpu_sig);711712if (!load_builtin_intel_microcode(&cp))713cp = find_microcode_in_initrd(ucode_path);714715if (!(cp.data && cp.size))716return NULL;717718return scan_microcode(cp.data, cp.size, uci, save);719}720721/*722* Invoked from an early init call to save the microcode blob which was723* selected during early boot when mm was not usable. The microcode must be724* saved because initrd is going away. It's an early init call so the APs725* just can use the pointer and do not have to scan initrd/builtin firmware726* again.727*/728static int __init save_builtin_microcode(void)729{730struct ucode_cpu_info uci;731732if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED)733return 0;734735if (microcode_loader_disabled() || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)736return 0;737738uci.mc = get_microcode_blob(&uci, true);739if (uci.mc)740save_microcode_patch(uci.mc);741return 0;742}743early_initcall(save_builtin_microcode);744745/* Load microcode on BSP from initrd or builtin blobs */746void __init load_ucode_intel_bsp(struct early_load_data *ed)747{748struct ucode_cpu_info uci;749750uci.mc = get_microcode_blob(&uci, false);751ed->old_rev = uci.cpu_sig.rev;752753if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED) {754ucode_patch_va = UCODE_BSP_LOADED;755ed->new_rev = uci.cpu_sig.rev;756}757}758759void load_ucode_intel_ap(void)760{761struct ucode_cpu_info uci;762763uci.mc = ucode_patch_va;764if (uci.mc)765apply_microcode_early(&uci);766}767768/* Reload microcode on resume */769void reload_ucode_intel(void)770{771struct ucode_cpu_info uci = { .mc = ucode_patch_va, };772773if (uci.mc)774apply_microcode_early(&uci);775}776777static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)778{779intel_collect_cpu_info(csig);780return 0;781}782783static enum ucode_state apply_microcode_late(int cpu)784{785struct ucode_cpu_info *uci = ucode_cpu_info + cpu;786struct microcode_intel *mc = ucode_patch_late;787enum ucode_state ret;788u32 cur_rev;789790if (WARN_ON_ONCE(smp_processor_id() != cpu))791return UCODE_ERROR;792793ret = __apply_microcode(uci, mc, &cur_rev);794if (ret != UCODE_UPDATED && ret != UCODE_OK)795return ret;796797cpu_data(cpu).microcode = uci->cpu_sig.rev;798if (!cpu)799boot_cpu_data.microcode = uci->cpu_sig.rev;800801return ret;802}803804static bool ucode_validate_minrev(struct microcode_header_intel *mc_header)805{806int cur_rev = boot_cpu_data.microcode;807808/*809* When late-loading, ensure the header declares a minimum revision810* required to perform a late-load. The previously reserved field811* is 0 in older microcode blobs.812*/813if (!mc_header->min_req_ver) {814pr_info("Unsafe microcode update: Microcode header does not specify a required min version\n");815return false;816}817818/*819* Check whether the current revision is either greater or equal to820* to the minimum revision specified in the header.821*/822if (cur_rev < mc_header->min_req_ver) {823pr_info("Unsafe microcode update: Current revision 0x%x too old\n", cur_rev);824pr_info("Current should be at 0x%x or higher. Use early loading instead\n", mc_header->min_req_ver);825return false;826}827return true;828}829830static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter)831{832struct ucode_cpu_info *uci = ucode_cpu_info + cpu;833bool is_safe, new_is_safe = false;834int cur_rev = uci->cpu_sig.rev;835unsigned int curr_mc_size = 0;836u8 *new_mc = NULL, *mc = NULL;837838while (iov_iter_count(iter)) {839struct microcode_header_intel mc_header;840unsigned int mc_size, data_size;841u8 *data;842843if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {844pr_err("error! Truncated or inaccessible header in microcode data file\n");845goto fail;846}847848mc_size = get_totalsize(&mc_header);849if (mc_size < sizeof(mc_header)) {850pr_err("error! Bad data in microcode data file (totalsize too small)\n");851goto fail;852}853data_size = mc_size - sizeof(mc_header);854if (data_size > iov_iter_count(iter)) {855pr_err("error! Bad data in microcode data file (truncated file?)\n");856goto fail;857}858859/* For performance reasons, reuse mc area when possible */860if (!mc || mc_size > curr_mc_size) {861kvfree(mc);862mc = kvmalloc(mc_size, GFP_KERNEL);863if (!mc)864goto fail;865curr_mc_size = mc_size;866}867868memcpy(mc, &mc_header, sizeof(mc_header));869data = mc + sizeof(mc_header);870if (!copy_from_iter_full(data, data_size, iter) ||871intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0)872goto fail;873874if (cur_rev >= mc_header.rev)875continue;876877if (!intel_find_matching_signature(mc, &uci->cpu_sig))878continue;879880is_safe = ucode_validate_minrev(&mc_header);881if (force_minrev && !is_safe)882continue;883884kvfree(new_mc);885cur_rev = mc_header.rev;886new_mc = mc;887new_is_safe = is_safe;888mc = NULL;889}890891if (iov_iter_count(iter))892goto fail;893894kvfree(mc);895if (!new_mc)896return UCODE_NFOUND;897898ucode_patch_late = (struct microcode_intel *)new_mc;899return new_is_safe ? UCODE_NEW_SAFE : UCODE_NEW;900901fail:902kvfree(mc);903kvfree(new_mc);904return UCODE_ERROR;905}906907static bool is_blacklisted(unsigned int cpu)908{909struct cpuinfo_x86 *c = &cpu_data(cpu);910911/*912* Late loading on model 79 with microcode revision less than 0x0b000021913* and LLC size per core bigger than 2.5MB may result in a system hang.914* This behavior is documented in item BDX90, #334165 (Intel Xeon915* Processor E7-8800/4800 v4 Product Family).916*/917if (c->x86_vfm == INTEL_BROADWELL_X &&918c->x86_stepping == 0x01 &&919llc_size_per_core > 2621440 &&920c->microcode < 0x0b000021) {921pr_err_once("Erratum BDX90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);922pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");923return true;924}925926return false;927}928929static enum ucode_state request_microcode_fw(int cpu, struct device *device)930{931struct cpuinfo_x86 *c = &cpu_data(cpu);932const struct firmware *firmware;933struct iov_iter iter;934enum ucode_state ret;935struct kvec kvec;936char name[30];937938if (is_blacklisted(cpu))939return UCODE_NFOUND;940941sprintf(name, "intel-ucode/%02x-%02x-%02x",942c->x86, c->x86_model, c->x86_stepping);943944if (request_firmware_direct(&firmware, name, device)) {945pr_debug("data file %s load failed\n", name);946return UCODE_NFOUND;947}948949kvec.iov_base = (void *)firmware->data;950kvec.iov_len = firmware->size;951iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size);952ret = parse_microcode_blobs(cpu, &iter);953954release_firmware(firmware);955956return ret;957}958959static void finalize_late_load(int result)960{961if (!result)962update_ucode_pointer(ucode_patch_late);963else964kvfree(ucode_patch_late);965ucode_patch_late = NULL;966}967968static struct microcode_ops microcode_intel_ops = {969.request_microcode_fw = request_microcode_fw,970.collect_cpu_info = collect_cpu_info,971.apply_microcode = apply_microcode_late,972.finalize_late_load = finalize_late_load,973.stage_microcode = stage_microcode,974.use_nmi = IS_ENABLED(CONFIG_X86_64),975};976977static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c)978{979u64 llc_size = c->x86_cache_size * 1024ULL;980981do_div(llc_size, topology_num_cores_per_package());982llc_size_per_core = (unsigned int)llc_size;983}984985static __init bool staging_available(void)986{987u64 val;988989val = x86_read_arch_cap_msr();990if (!(val & ARCH_CAP_MCU_ENUM))991return false;992993rdmsrq(MSR_IA32_MCU_ENUMERATION, val);994return !!(val & MCU_STAGING);995}996997struct microcode_ops * __init init_intel_microcode(void)998{999struct cpuinfo_x86 *c = &boot_cpu_data;10001001if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||1002cpu_has(c, X86_FEATURE_IA64)) {1003pr_err("Intel CPU family 0x%x not supported\n", c->x86);1004return NULL;1005}10061007if (staging_available()) {1008microcode_intel_ops.use_staging = true;1009pr_info("Enabled staging feature.\n");1010}10111012calc_llc_size_per_core(c);10131014return µcode_intel_ops;1015}101610171018