Path: blob/master/arch/x86/kernel/cpu/microcode/internal.h
26515 views
/* SPDX-License-Identifier: GPL-2.0 */1#ifndef _X86_MICROCODE_INTERNAL_H2#define _X86_MICROCODE_INTERNAL_H34#include <linux/earlycpio.h>5#include <linux/initrd.h>67#include <asm/cpu.h>8#include <asm/microcode.h>910struct device;1112enum ucode_state {13UCODE_OK = 0,14UCODE_NEW,15UCODE_NEW_SAFE,16UCODE_UPDATED,17UCODE_NFOUND,18UCODE_ERROR,19UCODE_TIMEOUT,20UCODE_OFFLINE,21};2223struct microcode_ops {24enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev);25void (*microcode_fini_cpu)(int cpu);2627/*28* The generic 'microcode_core' part guarantees that the callbacks29* below run on a target CPU when they are being called.30* See also the "Synchronization" section in microcode_core.c.31*/32enum ucode_state (*apply_microcode)(int cpu);33int (*collect_cpu_info)(int cpu, struct cpu_signature *csig);34void (*finalize_late_load)(int result);35unsigned int nmi_safe : 1,36use_nmi : 1;37};3839struct early_load_data {40u32 old_rev;41u32 new_rev;42};4344extern struct early_load_data early_data;45extern struct ucode_cpu_info ucode_cpu_info[];46struct cpio_data find_microcode_in_initrd(const char *path);4748#define MAX_UCODE_COUNT 1284950#define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))51#define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')52#define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')53#define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')54#define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')55#define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')56#define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')5758#define CPUID_IS(a, b, c, ebx, ecx, edx) \59(!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c))))6061/*62* In early loading microcode phase on BSP, boot_cpu_data is not set up yet.63* x86_cpuid_vendor() gets vendor id for BSP.64*65* In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify66* coding, we still use x86_cpuid_vendor() to get vendor id for AP.67*68* x86_cpuid_vendor() gets vendor information directly from CPUID.69*/70static inline int x86_cpuid_vendor(void)71{72u32 eax = 0x00000000;73u32 ebx, ecx = 0, edx;7475native_cpuid(&eax, &ebx, &ecx, &edx);7677if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))78return X86_VENDOR_INTEL;7980if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))81return X86_VENDOR_AMD;8283return X86_VENDOR_UNKNOWN;84}8586static inline unsigned int x86_cpuid_family(void)87{88u32 eax = 0x00000001;89u32 ebx, ecx = 0, edx;9091native_cpuid(&eax, &ebx, &ecx, &edx);9293return x86_family(eax);94}9596extern bool force_minrev;9798#ifdef CONFIG_CPU_SUP_AMD99void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);100void load_ucode_amd_ap(unsigned int family);101void reload_ucode_amd(unsigned int cpu);102struct microcode_ops *init_amd_microcode(void);103void exit_amd_microcode(void);104#else /* CONFIG_CPU_SUP_AMD */105static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }106static inline void load_ucode_amd_ap(unsigned int family) { }107static inline void reload_ucode_amd(unsigned int cpu) { }108static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }109static inline void exit_amd_microcode(void) { }110#endif /* !CONFIG_CPU_SUP_AMD */111112#ifdef CONFIG_CPU_SUP_INTEL113void load_ucode_intel_bsp(struct early_load_data *ed);114void load_ucode_intel_ap(void);115void reload_ucode_intel(void);116struct microcode_ops *init_intel_microcode(void);117#else /* CONFIG_CPU_SUP_INTEL */118static inline void load_ucode_intel_bsp(struct early_load_data *ed) { }119static inline void load_ucode_intel_ap(void) { }120static inline void reload_ucode_intel(void) { }121static inline struct microcode_ops *init_intel_microcode(void) { return NULL; }122#endif /* !CONFIG_CPU_SUP_INTEL */123124#endif /* _X86_MICROCODE_INTERNAL_H */125126127