#define pr_fmt(fmt) "SEV: " fmt
#include <linux/percpu-defs.h>
#include <linux/cc_platform.h>
#include <linux/printk.h>
#include <linux/mm_types.h>
#include <linux/set_memory.h>
#include <linux/memblock.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/cpumask.h>
#include <linux/efi.h>
#include <linux/io.h>
#include <linux/psp-sev.h>
#include <uapi/linux/sev-guest.h>
#include <asm/init.h>
#include <asm/cpu_entry_area.h>
#include <asm/stacktrace.h>
#include <asm/sev.h>
#include <asm/sev-internal.h>
#include <asm/insn-eval.h>
#include <asm/fpu/xcr.h>
#include <asm/processor.h>
#include <asm/realmode.h>
#include <asm/setup.h>
#include <asm/traps.h>
#include <asm/svm.h>
#include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/apic.h>
#include <asm/cpuid/api.h>
#include <asm/cmdline.h>
struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
struct ghcb *boot_ghcb __section(".data");
u64 sev_hv_features __ro_after_init;
u64 sev_secrets_pa __ro_after_init;
struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE);
DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
DEFINE_PER_CPU(u64, svsm_caa_pa);
noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
{
struct sev_es_runtime_data *data;
struct ghcb *ghcb;
WARN_ON(!irqs_disabled());
data = this_cpu_read(runtime_data);
ghcb = &data->ghcb_page;
if (unlikely(data->ghcb_active)) {
if (unlikely(data->backup_ghcb_active)) {
data->ghcb_active = false;
data->backup_ghcb_active = false;
instrumentation_begin();
panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
instrumentation_end();
}
data->backup_ghcb_active = true;
state->ghcb = &data->backup_ghcb;
*state->ghcb = *ghcb;
} else {
state->ghcb = NULL;
data->ghcb_active = true;
}
return ghcb;
}
#include "sev-shared.c"
noinstr void __sev_put_ghcb(struct ghcb_state *state)
{
struct sev_es_runtime_data *data;
struct ghcb *ghcb;
WARN_ON(!irqs_disabled());
data = this_cpu_read(runtime_data);
ghcb = &data->ghcb_page;
if (state->ghcb) {
*ghcb = *state->ghcb;
data->backup_ghcb_active = false;
state->ghcb = NULL;
} else {
vc_ghcb_invalidate(ghcb);
data->ghcb_active = false;
}
}
int svsm_perform_call_protocol(struct svsm_call *call)
{
struct ghcb_state state;
unsigned long flags;
struct ghcb *ghcb;
int ret;
flags = native_local_irq_save();
if (sev_cfg.ghcbs_initialized)
ghcb = __sev_get_ghcb(&state);
else if (boot_ghcb)
ghcb = boot_ghcb;
else
ghcb = NULL;
do {
ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
: svsm_perform_msr_protocol(call);
} while (ret == -EAGAIN);
if (sev_cfg.ghcbs_initialized)
__sev_put_ghcb(&state);
native_local_irq_restore(flags);
return ret;
}
void __head
early_set_pages_state(unsigned long vaddr, unsigned long paddr,
unsigned long npages, enum psc_op op)
{
unsigned long paddr_end;
u64 val;
vaddr = vaddr & PAGE_MASK;
paddr = paddr & PAGE_MASK;
paddr_end = paddr + (npages << PAGE_SHIFT);
while (paddr < paddr_end) {
if (op == SNP_PAGE_STATE_SHARED)
pvalidate_4k_page(vaddr, paddr, false);
sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
VMGEXIT();
val = sev_es_rd_ghcb_msr();
if (GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP)
goto e_term;
if (GHCB_MSR_PSC_RESP_VAL(val))
goto e_term;
if (op == SNP_PAGE_STATE_PRIVATE)
pvalidate_4k_page(vaddr, paddr, true);
vaddr += PAGE_SIZE;
paddr += PAGE_SIZE;
}
return;
e_term:
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
}
void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
unsigned long npages)
{
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
return;
early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
}
void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
unsigned long npages)
{
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
return;
early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
}
static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
{
struct cc_blob_sev_info *cc_info;
if (bp->cc_blob_address) {
cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
goto found_cc_info;
}
cc_info = find_cc_blob_setup_data(bp);
if (!cc_info)
return NULL;
found_cc_info:
if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
snp_abort();
return cc_info;
}
static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
{
struct svsm_call call = {};
int ret;
u64 pa;
if (!svsm_setup_ca(cc_info))
return;
pa = (u64)rip_rel_ptr(&boot_svsm_ca_page);
call.caa = svsm_get_caa();
call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
call.rcx = pa;
ret = svsm_perform_call_protocol(&call);
if (ret)
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL);
boot_svsm_caa = (struct svsm_ca *)pa;
boot_svsm_caa_pa = pa;
}
bool __head snp_init(struct boot_params *bp)
{
struct cc_blob_sev_info *cc_info;
if (!bp)
return false;
cc_info = find_cc_blob(bp);
if (!cc_info)
return false;
if (cc_info->secrets_phys && cc_info->secrets_len == PAGE_SIZE)
sev_secrets_pa = cc_info->secrets_phys;
else
return false;
setup_cpuid_table(cc_info);
svsm_setup(cc_info);
bp->cc_blob_address = (u32)(unsigned long)cc_info;
return true;
}
void __head __noreturn snp_abort(void)
{
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
}