// SPDX-License-Identifier: GPL-2.0-only1/*2* AMD Memory Encryption Support3*4* Copyright (C) 2019 SUSE5*6* Author: Joerg Roedel <[email protected]>7*/89#define pr_fmt(fmt) "SEV: " fmt1011#include <linux/bug.h>12#include <linux/kernel.h>1314#include <asm/cpu_entry_area.h>15#include <asm/msr.h>16#include <asm/ptrace.h>17#include <asm/sev.h>1819#include "internal.h"2021static __always_inline bool on_vc_stack(struct pt_regs *regs)22{23unsigned long sp = regs->sp;2425/* User-mode RSP is not trusted */26if (user_mode(regs))27return false;2829/* SYSCALL gap still has user-mode RSP */30if (ip_within_syscall_gap(regs))31return false;3233return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));34}3536/*37* This function handles the case when an NMI is raised in the #VC38* exception handler entry code, before the #VC handler has switched off39* its IST stack. In this case, the IST entry for #VC must be adjusted,40* so that any nested #VC exception will not overwrite the stack41* contents of the interrupted #VC handler.42*43* The IST entry is adjusted unconditionally so that it can be also be44* unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a45* nested sev_es_ist_exit() call may adjust back the IST entry too46* early.47*48* The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run49* on the NMI IST stack, as they are only called from NMI handling code50* right now.51*/52void noinstr __sev_es_ist_enter(struct pt_regs *regs)53{54unsigned long old_ist, new_ist;5556/* Read old IST entry */57new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);5859/*60* If NMI happened while on the #VC IST stack, set the new IST61* value below regs->sp, so that the interrupted stack frame is62* not overwritten by subsequent #VC exceptions.63*/64if (on_vc_stack(regs))65new_ist = regs->sp;6667/*68* Reserve additional 8 bytes and store old IST value so this69* adjustment can be unrolled in __sev_es_ist_exit().70*/71new_ist -= sizeof(old_ist);72*(unsigned long *)new_ist = old_ist;7374/* Set new IST entry */75this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);76}7778void noinstr __sev_es_ist_exit(void)79{80unsigned long ist;8182/* Read IST entry */83ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);8485if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))86return;8788/* Read back old IST entry and write it to the TSS */89this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);90}9192void noinstr __sev_es_nmi_complete(void)93{94struct ghcb_state state;95struct ghcb *ghcb;9697ghcb = __sev_get_ghcb(&state);9899vc_ghcb_invalidate(ghcb);100ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);101ghcb_set_sw_exit_info_1(ghcb, 0);102ghcb_set_sw_exit_info_2(ghcb, 0);103104sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));105VMGEXIT();106107__sev_put_ghcb(&state);108}109110/*111* Nothing shall interrupt this code path while holding the per-CPU112* GHCB. The backup GHCB is only for NMIs interrupting this path.113*114* Callers must disable local interrupts around it.115*/116noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)117{118struct sev_es_runtime_data *data;119struct ghcb *ghcb;120121WARN_ON(!irqs_disabled());122123data = this_cpu_read(runtime_data);124ghcb = &data->ghcb_page;125126if (unlikely(data->ghcb_active)) {127/* GHCB is already in use - save its contents */128129if (unlikely(data->backup_ghcb_active)) {130/*131* Backup-GHCB is also already in use. There is no way132* to continue here so just kill the machine. To make133* panic() work, mark GHCBs inactive so that messages134* can be printed out.135*/136data->ghcb_active = false;137data->backup_ghcb_active = false;138139instrumentation_begin();140panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");141instrumentation_end();142}143144/* Mark backup_ghcb active before writing to it */145data->backup_ghcb_active = true;146147state->ghcb = &data->backup_ghcb;148149/* Backup GHCB content */150*state->ghcb = *ghcb;151} else {152state->ghcb = NULL;153data->ghcb_active = true;154}155156return ghcb;157}158159noinstr void __sev_put_ghcb(struct ghcb_state *state)160{161struct sev_es_runtime_data *data;162struct ghcb *ghcb;163164WARN_ON(!irqs_disabled());165166data = this_cpu_read(runtime_data);167ghcb = &data->ghcb_page;168169if (state->ghcb) {170/* Restore GHCB from Backup */171*ghcb = *state->ghcb;172data->backup_ghcb_active = false;173state->ghcb = NULL;174} else {175/*176* Invalidate the GHCB so a VMGEXIT instruction issued177* from userspace won't appear to be valid.178*/179vc_ghcb_invalidate(ghcb);180data->ghcb_active = false;181}182}183184185