// SPDX-License-Identifier: GPL-2.01/*2* AMD Encrypted Register State Support3*4* Author: Joerg Roedel <[email protected]>5*/67/*8* misc.h needs to be first because it knows how to include the other kernel9* headers in the pre-decompression code in a way that does not break10* compilation.11*/12#include "misc.h"1314#include <asm/bootparam.h>15#include <asm/pgtable_types.h>16#include <asm/shared/msr.h>17#include <asm/sev.h>18#include <asm/trapnr.h>19#include <asm/trap_pf.h>20#include <asm/msr-index.h>21#include <asm/fpu/xcr.h>22#include <asm/ptrace.h>23#include <asm/svm.h>24#include <asm/cpuid/api.h>2526#include "error.h"27#include "sev.h"2829static struct ghcb boot_ghcb_page __aligned(PAGE_SIZE);30struct ghcb *boot_ghcb;3132#undef __init33#define __init3435#define __BOOT_COMPRESSED3637u8 snp_vmpl;38u16 ghcb_version;3940u64 boot_svsm_caa_pa;4142/* Include code for early handlers */43#include "../../boot/startup/sev-shared.c"4445static bool sev_snp_enabled(void)46{47return sev_status & MSR_AMD64_SEV_SNP_ENABLED;48}4950void snp_set_page_private(unsigned long paddr)51{52struct psc_desc d = {53SNP_PAGE_STATE_PRIVATE,54(struct svsm_ca *)boot_svsm_caa_pa,55boot_svsm_caa_pa56};5758if (!sev_snp_enabled())59return;6061__page_state_change(paddr, paddr, &d);62}6364void snp_set_page_shared(unsigned long paddr)65{66struct psc_desc d = {67SNP_PAGE_STATE_SHARED,68(struct svsm_ca *)boot_svsm_caa_pa,69boot_svsm_caa_pa70};7172if (!sev_snp_enabled())73return;7475__page_state_change(paddr, paddr, &d);76}7778bool early_setup_ghcb(void)79{80if (set_page_decrypted((unsigned long)&boot_ghcb_page))81return false;8283/* Page is now mapped decrypted, clear it */84memset(&boot_ghcb_page, 0, sizeof(boot_ghcb_page));8586boot_ghcb = &boot_ghcb_page;8788/* Initialize lookup tables for the instruction decoder */89sev_insn_decode_init();9091/* SNP guest requires the GHCB GPA must be registered */92if (sev_snp_enabled())93snp_register_ghcb_early(__pa(&boot_ghcb_page));9495return true;96}9798void snp_accept_memory(phys_addr_t start, phys_addr_t end)99{100struct psc_desc d = {101SNP_PAGE_STATE_PRIVATE,102(struct svsm_ca *)boot_svsm_caa_pa,103boot_svsm_caa_pa104};105106for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)107__page_state_change(pa, pa, &d);108}109110void sev_es_shutdown_ghcb(void)111{112if (!boot_ghcb)113return;114115if (!sev_es_check_cpu_features())116error("SEV-ES CPU Features missing.");117118/*119* This denotes whether to use the GHCB MSR protocol or the GHCB120* shared page to perform a GHCB request. Since the GHCB page is121* being changed to encrypted, it can't be used to perform GHCB122* requests. Clear the boot_ghcb variable so that the GHCB MSR123* protocol is used to change the GHCB page over to an encrypted124* page.125*/126boot_ghcb = NULL;127128/*129* GHCB Page must be flushed from the cache and mapped encrypted again.130* Otherwise the running kernel will see strange cache effects when131* trying to use that page.132*/133if (set_page_encrypted((unsigned long)&boot_ghcb_page))134error("Can't map GHCB page encrypted");135136/*137* GHCB page is mapped encrypted again and flushed from the cache.138* Mark it non-present now to catch bugs when #VC exceptions trigger139* after this point.140*/141if (set_page_non_present((unsigned long)&boot_ghcb_page))142error("Can't unmap GHCB page");143}144145static void __noreturn sev_es_ghcb_terminate(struct ghcb *ghcb, unsigned int set,146unsigned int reason, u64 exit_info_2)147{148u64 exit_info_1 = SVM_VMGEXIT_TERM_REASON(set, reason);149150vc_ghcb_invalidate(ghcb);151ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_TERM_REQUEST);152ghcb_set_sw_exit_info_1(ghcb, exit_info_1);153ghcb_set_sw_exit_info_2(ghcb, exit_info_2);154155sev_es_wr_ghcb_msr(__pa(ghcb));156VMGEXIT();157158while (true)159asm volatile("hlt\n" : : : "memory");160}161162bool sev_es_check_ghcb_fault(unsigned long address)163{164/* Check whether the fault was on the GHCB page */165return ((address & PAGE_MASK) == (unsigned long)&boot_ghcb_page);166}167168/*169* SNP_FEATURES_IMPL_REQ is the mask of SNP features that will need170* guest side implementation for proper functioning of the guest. If any171* of these features are enabled in the hypervisor but are lacking guest172* side implementation, the behavior of the guest will be undefined. The173* guest could fail in non-obvious way making it difficult to debug.174*175* As the behavior of reserved feature bits is unknown to be on the176* safe side add them to the required features mask.177*/178#define SNP_FEATURES_IMPL_REQ (MSR_AMD64_SNP_VTOM | \179MSR_AMD64_SNP_REFLECT_VC | \180MSR_AMD64_SNP_RESTRICTED_INJ | \181MSR_AMD64_SNP_ALT_INJ | \182MSR_AMD64_SNP_DEBUG_SWAP | \183MSR_AMD64_SNP_VMPL_SSS | \184MSR_AMD64_SNP_SECURE_TSC | \185MSR_AMD64_SNP_VMGEXIT_PARAM | \186MSR_AMD64_SNP_VMSA_REG_PROT | \187MSR_AMD64_SNP_RESERVED_BIT13 | \188MSR_AMD64_SNP_RESERVED_BIT15 | \189MSR_AMD64_SNP_SECURE_AVIC | \190MSR_AMD64_SNP_RESERVED_MASK)191192#ifdef CONFIG_AMD_SECURE_AVIC193#define SNP_FEATURE_SECURE_AVIC MSR_AMD64_SNP_SECURE_AVIC194#else195#define SNP_FEATURE_SECURE_AVIC 0196#endif197198/*199* SNP_FEATURES_PRESENT is the mask of SNP features that are implemented200* by the guest kernel. As and when a new feature is implemented in the201* guest kernel, a corresponding bit should be added to the mask.202*/203#define SNP_FEATURES_PRESENT (MSR_AMD64_SNP_DEBUG_SWAP | \204MSR_AMD64_SNP_SECURE_TSC | \205SNP_FEATURE_SECURE_AVIC)206207u64 snp_get_unsupported_features(u64 status)208{209if (!(status & MSR_AMD64_SEV_SNP_ENABLED))210return 0;211212return status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT;213}214215void snp_check_features(void)216{217u64 unsupported;218219/*220* Terminate the boot if hypervisor has enabled any feature lacking221* guest side implementation. Pass on the unsupported features mask through222* EXIT_INFO_2 of the GHCB protocol so that those features can be reported223* as part of the guest boot failure.224*/225unsupported = snp_get_unsupported_features(sev_status);226if (unsupported) {227if (ghcb_version < 2 || (!boot_ghcb && !early_setup_ghcb()))228sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);229230sev_es_ghcb_terminate(boot_ghcb, SEV_TERM_SET_GEN,231GHCB_SNP_UNSUPPORTED, unsupported);232}233}234235/* Search for Confidential Computing blob in the EFI config table. */236static struct cc_blob_sev_info *find_cc_blob_efi(struct boot_params *bp)237{238unsigned long cfg_table_pa;239unsigned int cfg_table_len;240int ret;241242ret = efi_get_conf_table(bp, &cfg_table_pa, &cfg_table_len);243if (ret)244return NULL;245246return (struct cc_blob_sev_info *)efi_find_vendor_table(bp, cfg_table_pa,247cfg_table_len,248EFI_CC_BLOB_GUID);249}250251/*252* Initial set up of SNP relies on information provided by the253* Confidential Computing blob, which can be passed to the boot kernel254* by firmware/bootloader in the following ways:255*256* - via an entry in the EFI config table257* - via a setup_data structure, as defined by the Linux Boot Protocol258*259* Scan for the blob in that order.260*/261static struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)262{263struct cc_blob_sev_info *cc_info;264265cc_info = find_cc_blob_efi(bp);266if (cc_info)267goto found_cc_info;268269cc_info = find_cc_blob_setup_data(bp);270if (!cc_info)271return NULL;272273found_cc_info:274if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)275sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);276277return cc_info;278}279280/*281* Indicate SNP based on presence of SNP-specific CC blob. Subsequent checks282* will verify the SNP CPUID/MSR bits.283*/284static bool early_snp_init(struct boot_params *bp)285{286struct cc_blob_sev_info *cc_info;287288if (!bp)289return false;290291cc_info = find_cc_blob(bp);292if (!cc_info)293return false;294295/*296* If a SNP-specific Confidential Computing blob is present, then297* firmware/bootloader have indicated SNP support. Verifying this298* involves CPUID checks which will be more reliable if the SNP299* CPUID table is used. See comments over snp_setup_cpuid_table() for300* more details.301*/302setup_cpuid_table(cc_info);303304/*305* Record the SVSM Calling Area (CA) address if the guest is not306* running at VMPL0. The CA will be used to communicate with the307* SVSM and request its services.308*/309svsm_setup_ca(cc_info, rip_rel_ptr(&boot_ghcb_page));310311/*312* Pass run-time kernel a pointer to CC info via boot_params so EFI313* config table doesn't need to be searched again during early startup314* phase.315*/316bp->cc_blob_address = (u32)(unsigned long)cc_info;317318return true;319}320321/*322* sev_check_cpu_support - Check for SEV support in the CPU capabilities323*324* Returns < 0 if SEV is not supported, otherwise the position of the325* encryption bit in the page table descriptors.326*/327static int sev_check_cpu_support(void)328{329unsigned int eax, ebx, ecx, edx;330331/* Check for the SME/SEV support leaf */332eax = 0x80000000;333ecx = 0;334native_cpuid(&eax, &ebx, &ecx, &edx);335if (eax < 0x8000001f)336return -ENODEV;337338/*339* Check for the SME/SEV feature:340* CPUID Fn8000_001F[EAX]341* - Bit 0 - Secure Memory Encryption support342* - Bit 1 - Secure Encrypted Virtualization support343* CPUID Fn8000_001F[EBX]344* - Bits 5:0 - Pagetable bit position used to indicate encryption345*/346eax = 0x8000001f;347ecx = 0;348native_cpuid(&eax, &ebx, &ecx, &edx);349/* Check whether SEV is supported */350if (!(eax & BIT(1)))351return -ENODEV;352353sev_snp_needs_sfw = !(ebx & BIT(31));354355return ebx & 0x3f;356}357358void sev_enable(struct boot_params *bp)359{360struct msr m;361int bitpos;362bool snp;363364/*365* bp->cc_blob_address should only be set by boot/compressed kernel.366* Initialize it to 0 to ensure that uninitialized values from367* buggy bootloaders aren't propagated.368*/369if (bp)370bp->cc_blob_address = 0;371372/*373* Do an initial SEV capability check before early_snp_init() which374* loads the CPUID page and the same checks afterwards are done375* without the hypervisor and are trustworthy.376*377* If the HV fakes SEV support, the guest will crash'n'burn378* which is good enough.379*/380381if (sev_check_cpu_support() < 0)382return;383384/*385* Setup/preliminary detection of SNP. This will be sanity-checked386* against CPUID/MSR values later.387*/388snp = early_snp_init(bp);389390/* Now repeat the checks with the SNP CPUID table. */391392bitpos = sev_check_cpu_support();393if (bitpos < 0) {394if (snp)395error("SEV-SNP support indicated by CC blob, but not CPUID.");396return;397}398399/* Set the SME mask if this is an SEV guest. */400raw_rdmsr(MSR_AMD64_SEV, &m);401sev_status = m.q;402if (!(sev_status & MSR_AMD64_SEV_ENABLED))403return;404405/* Negotiate the GHCB protocol version. */406if (sev_status & MSR_AMD64_SEV_ES_ENABLED) {407if (!sev_es_negotiate_protocol())408sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_PROT_UNSUPPORTED);409}410411/*412* SNP is supported in v2 of the GHCB spec which mandates support for HV413* features.414*/415if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {416u64 hv_features;417418hv_features = get_hv_features();419if (!(hv_features & GHCB_HV_FT_SNP))420sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);421422/*423* Running at VMPL0 is required unless an SVSM is present and424* the hypervisor supports the required SVSM GHCB events.425*/426if (snp_vmpl && !(hv_features & GHCB_HV_FT_SNP_MULTI_VMPL))427sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NOT_VMPL0);428}429430if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))431error("SEV-SNP supported indicated by CC blob, but not SEV status MSR.");432433sme_me_mask = BIT_ULL(bitpos);434}435436/*437* sev_get_status - Retrieve the SEV status mask438*439* Returns 0 if the CPU is not SEV capable, otherwise the value of the440* AMD64_SEV MSR.441*/442u64 sev_get_status(void)443{444struct msr m;445446if (sev_check_cpu_support() < 0)447return 0;448449raw_rdmsr(MSR_AMD64_SEV, &m);450return m.q;451}452453void sev_prep_identity_maps(unsigned long top_level_pgt)454{455/*456* The Confidential Computing blob is used very early in uncompressed457* kernel to find the in-memory CPUID table to handle CPUID458* instructions. Make sure an identity-mapping exists so it can be459* accessed after switchover.460*/461if (sev_snp_enabled()) {462unsigned long cc_info_pa = boot_params_ptr->cc_blob_address;463struct cc_blob_sev_info *cc_info;464465kernel_add_identity_map(cc_info_pa, cc_info_pa + sizeof(*cc_info));466467cc_info = (struct cc_blob_sev_info *)cc_info_pa;468kernel_add_identity_map(cc_info->cpuid_phys, cc_info->cpuid_phys + cc_info->cpuid_len);469}470471sev_verify_cbit(top_level_pgt);472}473474bool early_is_sevsnp_guest(void)475{476static bool sevsnp;477478if (sevsnp)479return true;480481if (!(sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED))482return false;483484sevsnp = true;485486if (!snp_vmpl) {487unsigned int eax, ebx, ecx, edx;488489/*490* CPUID Fn8000_001F_EAX[28] - SVSM support491*/492eax = 0x8000001f;493ecx = 0;494native_cpuid(&eax, &ebx, &ecx, &edx);495if (eax & BIT(28)) {496struct msr m;497498/* Obtain the address of the calling area to use */499raw_rdmsr(MSR_SVSM_CAA, &m);500boot_svsm_caa_pa = m.q;501502/*503* The real VMPL level cannot be discovered, but the504* memory acceptance routines make no use of that so505* any non-zero value suffices here.506*/507snp_vmpl = U8_MAX;508}509}510return true;511}512513514