/* SPDX-License-Identifier: GPL-2.0 */1#include <linux/linkage.h>2#include <asm/asm.h>3#include <asm/asm-offsets.h>4#include <asm/bitsperlong.h>5#include <asm/frame.h>6#include <asm/kvm_vcpu_regs.h>7#include <asm/nospec-branch.h>8#include "kvm-asm-offsets.h"910#define WORD_SIZE (BITS_PER_LONG / 8)1112/* Intentionally omit RAX as it's context switched by hardware */13#define VCPU_RCX (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)14#define VCPU_RDX (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)15#define VCPU_RBX (SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)16/* Intentionally omit RSP as it's context switched by hardware */17#define VCPU_RBP (SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)18#define VCPU_RSI (SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)19#define VCPU_RDI (SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)2021#ifdef CONFIG_X86_6422#define VCPU_R8 (SVM_vcpu_arch_regs + __VCPU_REGS_R8 * WORD_SIZE)23#define VCPU_R9 (SVM_vcpu_arch_regs + __VCPU_REGS_R9 * WORD_SIZE)24#define VCPU_R10 (SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)25#define VCPU_R11 (SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)26#define VCPU_R12 (SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)27#define VCPU_R13 (SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)28#define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)29#define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)30#endif3132#define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa)3334.section .noinstr.text, "ax"3536.macro RESTORE_GUEST_SPEC_CTRL37/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */38ALTERNATIVE_2 "", \39"jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \40"", X86_FEATURE_V_SPEC_CTRL41801:42.endm43.macro RESTORE_GUEST_SPEC_CTRL_BODY44800:45/*46* SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the47* host's, write the MSR. This is kept out-of-line so that the common48* case does not have to jump.49*50* IMPORTANT: To avoid RSB underflow attacks and any other nastiness,51* there must not be any returns or indirect branches between this code52* and vmentry.53*/54movl SVM_spec_ctrl(%_ASM_DI), %eax55cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax56je 801b57mov $MSR_IA32_SPEC_CTRL, %ecx58xor %edx, %edx59wrmsr60jmp 801b61.endm6263.macro RESTORE_HOST_SPEC_CTRL64/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */65ALTERNATIVE_2 "", \66"jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \67"", X86_FEATURE_V_SPEC_CTRL68901:69.endm70.macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted:req71900:72/* Same for after vmexit. */73mov $MSR_IA32_SPEC_CTRL, %ecx7475/*76* Load the value that the guest had written into MSR_IA32_SPEC_CTRL,77* if it was not intercepted during guest execution.78*/79cmpb $0, \spec_ctrl_intercepted80jnz 998f81rdmsr82movl %eax, SVM_spec_ctrl(%_ASM_DI)83998:8485/* Now restore the host value of the MSR if different from the guest's. */86movl PER_CPU_VAR(x86_spec_ctrl_current), %eax87cmp SVM_spec_ctrl(%_ASM_DI), %eax88je 901b89xor %edx, %edx90wrmsr91jmp 901b92.endm939495/**96* __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode97* @svm: struct vcpu_svm *98* @spec_ctrl_intercepted: bool99*/100SYM_FUNC_START(__svm_vcpu_run)101push %_ASM_BP102mov %_ASM_SP, %_ASM_BP103#ifdef CONFIG_X86_64104push %r15105push %r14106push %r13107push %r12108#else109push %edi110push %esi111#endif112push %_ASM_BX113114/*115* Save variables needed after vmexit on the stack, in inverse116* order compared to when they are needed.117*/118119/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */120push %_ASM_ARG2121122/* Needed to restore access to percpu variables. */123__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)124125/* Finally save @svm. */126push %_ASM_ARG1127128.ifnc _ASM_ARG1, _ASM_DI129/*130* Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX131* and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.132*/133mov %_ASM_ARG1, %_ASM_DI134.endif135136/* Clobbers RAX, RCX, RDX. */137RESTORE_GUEST_SPEC_CTRL138139/*140* Use a single vmcb (vmcb01 because it's always valid) for141* context switching guest state via VMLOAD/VMSAVE, that way142* the state doesn't need to be copied between vmcb01 and143* vmcb02 when switching vmcbs for nested virtualization.144*/145mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX1461: vmload %_ASM_AX1472:148149/* Get svm->current_vmcb->pa into RAX. */150mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX151mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX152153/* Load guest registers. */154mov VCPU_RCX(%_ASM_DI), %_ASM_CX155mov VCPU_RDX(%_ASM_DI), %_ASM_DX156mov VCPU_RBX(%_ASM_DI), %_ASM_BX157mov VCPU_RBP(%_ASM_DI), %_ASM_BP158mov VCPU_RSI(%_ASM_DI), %_ASM_SI159#ifdef CONFIG_X86_64160mov VCPU_R8 (%_ASM_DI), %r8161mov VCPU_R9 (%_ASM_DI), %r9162mov VCPU_R10(%_ASM_DI), %r10163mov VCPU_R11(%_ASM_DI), %r11164mov VCPU_R12(%_ASM_DI), %r12165mov VCPU_R13(%_ASM_DI), %r13166mov VCPU_R14(%_ASM_DI), %r14167mov VCPU_R15(%_ASM_DI), %r15168#endif169mov VCPU_RDI(%_ASM_DI), %_ASM_DI170171/* Clobbers EFLAGS.ZF */172VM_CLEAR_CPU_BUFFERS173174/* Enter guest mode */1753: vmrun %_ASM_AX1764:177/* Pop @svm to RAX while it's the only available register. */178pop %_ASM_AX179180/* Save all guest registers. */181mov %_ASM_CX, VCPU_RCX(%_ASM_AX)182mov %_ASM_DX, VCPU_RDX(%_ASM_AX)183mov %_ASM_BX, VCPU_RBX(%_ASM_AX)184mov %_ASM_BP, VCPU_RBP(%_ASM_AX)185mov %_ASM_SI, VCPU_RSI(%_ASM_AX)186mov %_ASM_DI, VCPU_RDI(%_ASM_AX)187#ifdef CONFIG_X86_64188mov %r8, VCPU_R8 (%_ASM_AX)189mov %r9, VCPU_R9 (%_ASM_AX)190mov %r10, VCPU_R10(%_ASM_AX)191mov %r11, VCPU_R11(%_ASM_AX)192mov %r12, VCPU_R12(%_ASM_AX)193mov %r13, VCPU_R13(%_ASM_AX)194mov %r14, VCPU_R14(%_ASM_AX)195mov %r15, VCPU_R15(%_ASM_AX)196#endif197198/* @svm can stay in RDI from now on. */199mov %_ASM_AX, %_ASM_DI200201mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX2025: vmsave %_ASM_AX2036:204205/* Restores GSBASE among other things, allowing access to percpu data. */206pop %_ASM_AX2077: vmload %_ASM_AX2088:209210/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */211FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT212213/* Clobbers RAX, RCX, RDX. */214RESTORE_HOST_SPEC_CTRL215216/*217* Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be218* untrained as soon as we exit the VM and are back to the219* kernel. This should be done before re-enabling interrupts220* because interrupt handlers won't sanitize 'ret' if the return is221* from the kernel.222*/223UNTRAIN_RET_VM224225/*226* Clear all general purpose registers except RSP and RAX to prevent227* speculative use of the guest's values, even those that are reloaded228* via the stack. In theory, an L1 cache miss when restoring registers229* could lead to speculative execution with the guest's values.230* Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially231* free. RSP and RAX are exempt as they are restored by hardware232* during VM-Exit.233*/234xor %ecx, %ecx235xor %edx, %edx236xor %ebx, %ebx237xor %ebp, %ebp238xor %esi, %esi239xor %edi, %edi240#ifdef CONFIG_X86_64241xor %r8d, %r8d242xor %r9d, %r9d243xor %r10d, %r10d244xor %r11d, %r11d245xor %r12d, %r12d246xor %r13d, %r13d247xor %r14d, %r14d248xor %r15d, %r15d249#endif250251/* "Pop" @spec_ctrl_intercepted. */252pop %_ASM_BX253254pop %_ASM_BX255256#ifdef CONFIG_X86_64257pop %r12258pop %r13259pop %r14260pop %r15261#else262pop %esi263pop %edi264#endif265pop %_ASM_BP266RET267268RESTORE_GUEST_SPEC_CTRL_BODY269RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)27027110: cmpb $0, _ASM_RIP(kvm_rebooting)272jne 2b273ud227430: cmpb $0, _ASM_RIP(kvm_rebooting)275jne 4b276ud227750: cmpb $0, _ASM_RIP(kvm_rebooting)278jne 6b279ud228070: cmpb $0, _ASM_RIP(kvm_rebooting)281jne 8b282ud2283284_ASM_EXTABLE(1b, 10b)285_ASM_EXTABLE(3b, 30b)286_ASM_EXTABLE(5b, 50b)287_ASM_EXTABLE(7b, 70b)288289SYM_FUNC_END(__svm_vcpu_run)290291#ifdef CONFIG_KVM_AMD_SEV292293294#ifdef CONFIG_X86_64295#define SEV_ES_GPRS_BASE 0x300296#define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)297#define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)298#define SEV_ES_RSI (SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE)299#define SEV_ES_RDI (SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE)300#define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)301#define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)302#define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)303#define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)304#endif305306/**307* __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode308* @svm: struct vcpu_svm *309* @spec_ctrl_intercepted: bool310*/311SYM_FUNC_START(__svm_sev_es_vcpu_run)312FRAME_BEGIN313314/*315* Save non-volatile (callee-saved) registers to the host save area.316* Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not317* saved on VMRUN.318*/319mov %rbp, SEV_ES_RBP (%rdx)320mov %r15, SEV_ES_R15 (%rdx)321mov %r14, SEV_ES_R14 (%rdx)322mov %r13, SEV_ES_R13 (%rdx)323mov %r12, SEV_ES_R12 (%rdx)324mov %rbx, SEV_ES_RBX (%rdx)325326/*327* Save volatile registers that hold arguments that are needed after328* #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted).329*/330mov %rdi, SEV_ES_RDI (%rdx)331mov %rsi, SEV_ES_RSI (%rdx)332333/* Clobbers RAX, RCX, RDX (@hostsa). */334RESTORE_GUEST_SPEC_CTRL335336/* Get svm->current_vmcb->pa into RAX. */337mov SVM_current_vmcb(%rdi), %rax338mov KVM_VMCB_pa(%rax), %rax339340/* Clobbers EFLAGS.ZF */341VM_CLEAR_CPU_BUFFERS342343/* Enter guest mode */3441: vmrun %rax3452:346/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */347FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT348349/* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */350RESTORE_HOST_SPEC_CTRL351352/*353* Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be354* untrained as soon as we exit the VM and are back to the355* kernel. This should be done before re-enabling interrupts356* because interrupt handlers won't sanitize RET if the return is357* from the kernel.358*/359UNTRAIN_RET_VM360361FRAME_END362RET363364RESTORE_GUEST_SPEC_CTRL_BODY365RESTORE_HOST_SPEC_CTRL_BODY %sil3663673: cmpb $0, kvm_rebooting(%rip)368jne 2b369ud2370371_ASM_EXTABLE(1b, 3b)372373SYM_FUNC_END(__svm_sev_es_vcpu_run)374#endif /* CONFIG_KVM_AMD_SEV */375376377