// SPDX-License-Identifier: GPL-2.0-only1/*2* AMD Memory Encryption Support3*4* Copyright (C) 2019 SUSE5*6* Author: Joerg Roedel <[email protected]>7*/89#define pr_fmt(fmt) "SEV: " fmt1011#include <linux/bug.h>12#include <linux/kernel.h>1314#include <asm/cpu_entry_area.h>15#include <asm/msr.h>16#include <asm/ptrace.h>17#include <asm/sev.h>18#include <asm/sev-internal.h>1920static __always_inline bool on_vc_stack(struct pt_regs *regs)21{22unsigned long sp = regs->sp;2324/* User-mode RSP is not trusted */25if (user_mode(regs))26return false;2728/* SYSCALL gap still has user-mode RSP */29if (ip_within_syscall_gap(regs))30return false;3132return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));33}3435/*36* This function handles the case when an NMI is raised in the #VC37* exception handler entry code, before the #VC handler has switched off38* its IST stack. In this case, the IST entry for #VC must be adjusted,39* so that any nested #VC exception will not overwrite the stack40* contents of the interrupted #VC handler.41*42* The IST entry is adjusted unconditionally so that it can be also be43* unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a44* nested sev_es_ist_exit() call may adjust back the IST entry too45* early.46*47* The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run48* on the NMI IST stack, as they are only called from NMI handling code49* right now.50*/51void noinstr __sev_es_ist_enter(struct pt_regs *regs)52{53unsigned long old_ist, new_ist;5455/* Read old IST entry */56new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);5758/*59* If NMI happened while on the #VC IST stack, set the new IST60* value below regs->sp, so that the interrupted stack frame is61* not overwritten by subsequent #VC exceptions.62*/63if (on_vc_stack(regs))64new_ist = regs->sp;6566/*67* Reserve additional 8 bytes and store old IST value so this68* adjustment can be unrolled in __sev_es_ist_exit().69*/70new_ist -= sizeof(old_ist);71*(unsigned long *)new_ist = old_ist;7273/* Set new IST entry */74this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);75}7677void noinstr __sev_es_ist_exit(void)78{79unsigned long ist;8081/* Read IST entry */82ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);8384if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))85return;8687/* Read back old IST entry and write it to the TSS */88this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);89}9091void noinstr __sev_es_nmi_complete(void)92{93struct ghcb_state state;94struct ghcb *ghcb;9596ghcb = __sev_get_ghcb(&state);9798vc_ghcb_invalidate(ghcb);99ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);100ghcb_set_sw_exit_info_1(ghcb, 0);101ghcb_set_sw_exit_info_2(ghcb, 0);102103sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));104VMGEXIT();105106__sev_put_ghcb(&state);107}108109110