/*-1* SPDX-License-Identifier: BSD-2-Clause2*3* Copyright (c) 2011 NetApp, Inc.4* Copyright (c) 2013 Neel Natu <[email protected]>5* All rights reserved.6*7* Redistribution and use in source and binary forms, with or without8* modification, are permitted provided that the following conditions9* are met:10* 1. Redistributions of source code must retain the above copyright11* notice, this list of conditions and the following disclaimer.12* 2. Redistributions in binary form must reproduce the above copyright13* notice, this list of conditions and the following disclaimer in the14* documentation and/or other materials provided with the distribution.15*16* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND17* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE18* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE19* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE20* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL21* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS22* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)23* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT24* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY25* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF26* SUCH DAMAGE.27*/2829#include <machine/asmacros.h>30#include <machine/specialreg.h>3132#include "vmx_assym.h"3334/* Be friendly to DTrace FBT's prologue/epilogue pattern matching */35#define VENTER push %rbp ; mov %rsp,%rbp36#define VLEAVE pop %rbp3738/*39* Save the guest context.40*/41#define VMX_GUEST_SAVE \42movq %rdi,VMXCTX_GUEST_RDI(%rsp); \43movq %rsi,VMXCTX_GUEST_RSI(%rsp); \44movq %rdx,VMXCTX_GUEST_RDX(%rsp); \45movq %rcx,VMXCTX_GUEST_RCX(%rsp); \46movq %r8,VMXCTX_GUEST_R8(%rsp); \47movq %r9,VMXCTX_GUEST_R9(%rsp); \48movq %rax,VMXCTX_GUEST_RAX(%rsp); \49movq %rbx,VMXCTX_GUEST_RBX(%rsp); \50movq %rbp,VMXCTX_GUEST_RBP(%rsp); \51movq %r10,VMXCTX_GUEST_R10(%rsp); \52movq %r11,VMXCTX_GUEST_R11(%rsp); \53movq %r12,VMXCTX_GUEST_R12(%rsp); \54movq %r13,VMXCTX_GUEST_R13(%rsp); \55movq %r14,VMXCTX_GUEST_R14(%rsp); \56movq %r15,VMXCTX_GUEST_R15(%rsp); \57movq %cr2,%rdi; \58movq %rdi,VMXCTX_GUEST_CR2(%rsp); \59movq %rsp,%rdi;6061/*62* Assumes that %rdi holds a pointer to the 'vmxctx'.63*64* On "return" all registers are updated to reflect guest state. The two65* exceptions are %rip and %rsp. These registers are atomically switched66* by hardware from the guest area of the vmcs.67*68* We modify %rsp to point to the 'vmxctx' so we can use it to restore69* host context in case of an error with 'vmlaunch' or 'vmresume'.70*/71#define VMX_GUEST_RESTORE \72movq %rdi,%rsp; \73movq VMXCTX_GUEST_CR2(%rdi),%rsi; \74movq %rsi,%cr2; \75movq VMXCTX_GUEST_RSI(%rdi),%rsi; \76movq VMXCTX_GUEST_RDX(%rdi),%rdx; \77movq VMXCTX_GUEST_RCX(%rdi),%rcx; \78movq VMXCTX_GUEST_R8(%rdi),%r8; \79movq VMXCTX_GUEST_R9(%rdi),%r9; \80movq VMXCTX_GUEST_RAX(%rdi),%rax; \81movq VMXCTX_GUEST_RBX(%rdi),%rbx; \82movq VMXCTX_GUEST_RBP(%rdi),%rbp; \83movq VMXCTX_GUEST_R10(%rdi),%r10; \84movq VMXCTX_GUEST_R11(%rdi),%r11; \85movq VMXCTX_GUEST_R12(%rdi),%r12; \86movq VMXCTX_GUEST_R13(%rdi),%r13; \87movq VMXCTX_GUEST_R14(%rdi),%r14; \88movq VMXCTX_GUEST_R15(%rdi),%r15; \89movq VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */9091/*92* Clobber the remaining registers with guest contents so they can't93* be misused.94*/95#define VMX_GUEST_CLOBBER \96xor %rax, %rax; \97xor %rcx, %rcx; \98xor %rdx, %rdx; \99xor %rsi, %rsi; \100xor %r8, %r8; \101xor %r9, %r9; \102xor %r10, %r10; \103xor %r11, %r11;104105/*106* Save and restore the host context.107*108* Assumes that %rdi holds a pointer to the 'vmxctx'.109*/110#define VMX_HOST_SAVE \111movq %r15, VMXCTX_HOST_R15(%rdi); \112movq %r14, VMXCTX_HOST_R14(%rdi); \113movq %r13, VMXCTX_HOST_R13(%rdi); \114movq %r12, VMXCTX_HOST_R12(%rdi); \115movq %rbp, VMXCTX_HOST_RBP(%rdi); \116movq %rsp, VMXCTX_HOST_RSP(%rdi); \117movq %rbx, VMXCTX_HOST_RBX(%rdi); \118119#define VMX_HOST_RESTORE \120movq VMXCTX_HOST_R15(%rdi), %r15; \121movq VMXCTX_HOST_R14(%rdi), %r14; \122movq VMXCTX_HOST_R13(%rdi), %r13; \123movq VMXCTX_HOST_R12(%rdi), %r12; \124movq VMXCTX_HOST_RBP(%rdi), %rbp; \125movq VMXCTX_HOST_RSP(%rdi), %rsp; \126movq VMXCTX_HOST_RBX(%rdi), %rbx; \127128/*129* vmx_enter_guest(struct vmxctx *vmxctx, int launched)130* %rdi: pointer to the 'vmxctx'131* %rsi: pointer to the 'vmx'132* %edx: launch state of the VMCS133* Interrupts must be disabled on entry.134*/135ENTRY(vmx_enter_guest)136VENTER137/*138* Save host state before doing anything else.139*/140VMX_HOST_SAVE141142guest_restore:143movl %edx, %r8d144cmpb $0, guest_l1d_flush_sw(%rip)145je after_l1d146call flush_l1d_sw147after_l1d:148cmpl $0, %r8d149je do_launch150VMX_GUEST_RESTORE151vmresume152/*153* In the common case 'vmresume' returns back to the host through154* 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.155*156* If there is an error we return VMX_VMRESUME_ERROR to the caller.157*/158movq %rsp, %rdi /* point %rdi back to 'vmxctx' */159movl $VMX_VMRESUME_ERROR, %eax160jmp decode_inst_error161162do_launch:163VMX_GUEST_RESTORE164vmlaunch165/*166* In the common case 'vmlaunch' returns back to the host through167* 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.168*169* If there is an error we return VMX_VMLAUNCH_ERROR to the caller.170*/171movq %rsp, %rdi /* point %rdi back to 'vmxctx' */172movl $VMX_VMLAUNCH_ERROR, %eax173/* FALLTHROUGH */174decode_inst_error:175movl $VM_FAIL_VALID, %r11d176movl $VM_FAIL_INVALID, %esi177cmovnzl %esi, %r11d178movl %r11d, VMXCTX_INST_FAIL_STATUS(%rdi)179180/*181* The return value is already populated in %eax so we cannot use182* it as a scratch register beyond this point.183*/184185VMX_HOST_RESTORE186VLEAVE187ret188189/*190* Non-error VM-exit from the guest. Make this a label so it can191* be used by C code when setting up the VMCS.192* The VMCS-restored %rsp points to the struct vmxctx193*/194ALIGN_TEXT195.globl vmx_exit_guest_flush_rsb196vmx_exit_guest_flush_rsb:197/*198* Save guest state that is not automatically saved in the vmcs.199*/200VMX_GUEST_SAVE201202VMX_HOST_RESTORE203204VMX_GUEST_CLOBBER205206/*207* To prevent malicious branch target predictions from208* affecting the host, overwrite all entries in the RSB upon209* exiting a guest.210*/211mov $16, %ecx /* 16 iterations, two calls per loop */212mov %rsp, %rax2130: call 2f /* create an RSB entry. */2141: pause215call 1b /* capture rogue speculation. */2162: call 2f /* create an RSB entry. */2171: pause218call 1b /* capture rogue speculation. */2192: sub $1, %ecx220jnz 0b221mov %rax, %rsp222223/*224* This will return to the caller of 'vmx_enter_guest()' with a return225* value of VMX_GUEST_VMEXIT.226*/227movl $VMX_GUEST_VMEXIT, %eax228VLEAVE229ret230231.globl vmx_exit_guest232vmx_exit_guest:233/*234* Save guest state that is not automatically saved in the vmcs.235*/236VMX_GUEST_SAVE237238VMX_HOST_RESTORE239240VMX_GUEST_CLOBBER241242/*243* This will return to the caller of 'vmx_enter_guest()' with a return244* value of VMX_GUEST_VMEXIT.245*/246movl $VMX_GUEST_VMEXIT, %eax247VLEAVE248ret249END(vmx_enter_guest)250251/*252* %rdi = interrupt handler entry point253*254* Calling sequence described in the "Instruction Set Reference" for the "INT"255* instruction in Intel SDM, Vol 2.256*/257ENTRY(vmx_call_isr)258VENTER259mov %rsp, %r11 /* save %rsp */260and $~0xf, %rsp /* align on 16-byte boundary */261pushq $KERNEL_SS /* %ss */262pushq %r11 /* %rsp */263pushfq /* %rflags */264pushq $KERNEL_CS /* %cs */265cli /* disable interrupts */266callq *%rdi /* push %rip and call isr */267VLEAVE268ret269END(vmx_call_isr)270271272