Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/amd64/vmm/intel/vmx_support.S
39536 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2011 NetApp, Inc.
5
* Copyright (c) 2013 Neel Natu <[email protected]>
6
* All rights reserved.
7
*
8
* Redistribution and use in source and binary forms, with or without
9
* modification, are permitted provided that the following conditions
10
* are met:
11
* 1. Redistributions of source code must retain the above copyright
12
* notice, this list of conditions and the following disclaimer.
13
* 2. Redistributions in binary form must reproduce the above copyright
14
* notice, this list of conditions and the following disclaimer in the
15
* documentation and/or other materials provided with the distribution.
16
*
17
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
18
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
21
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
* SUCH DAMAGE.
28
*/
29
30
#include <machine/asmacros.h>
31
#include <machine/specialreg.h>
32
33
#include "vmx_assym.h"
34
35
/* Be friendly to DTrace FBT's prologue/epilogue pattern matching */
36
#define VENTER push %rbp ; mov %rsp,%rbp
37
#define VLEAVE pop %rbp
38
39
/*
40
* Save the guest context.
41
*/
42
#define VMX_GUEST_SAVE \
43
movq %rdi,VMXCTX_GUEST_RDI(%rsp); \
44
movq %rsi,VMXCTX_GUEST_RSI(%rsp); \
45
movq %rdx,VMXCTX_GUEST_RDX(%rsp); \
46
movq %rcx,VMXCTX_GUEST_RCX(%rsp); \
47
movq %r8,VMXCTX_GUEST_R8(%rsp); \
48
movq %r9,VMXCTX_GUEST_R9(%rsp); \
49
movq %rax,VMXCTX_GUEST_RAX(%rsp); \
50
movq %rbx,VMXCTX_GUEST_RBX(%rsp); \
51
movq %rbp,VMXCTX_GUEST_RBP(%rsp); \
52
movq %r10,VMXCTX_GUEST_R10(%rsp); \
53
movq %r11,VMXCTX_GUEST_R11(%rsp); \
54
movq %r12,VMXCTX_GUEST_R12(%rsp); \
55
movq %r13,VMXCTX_GUEST_R13(%rsp); \
56
movq %r14,VMXCTX_GUEST_R14(%rsp); \
57
movq %r15,VMXCTX_GUEST_R15(%rsp); \
58
movq %cr2,%rdi; \
59
movq %rdi,VMXCTX_GUEST_CR2(%rsp); \
60
movq %rsp,%rdi;
61
62
/*
63
* Assumes that %rdi holds a pointer to the 'vmxctx'.
64
*
65
* On "return" all registers are updated to reflect guest state. The two
66
* exceptions are %rip and %rsp. These registers are atomically switched
67
* by hardware from the guest area of the vmcs.
68
*
69
* We modify %rsp to point to the 'vmxctx' so we can use it to restore
70
* host context in case of an error with 'vmlaunch' or 'vmresume'.
71
*/
72
#define VMX_GUEST_RESTORE \
73
movq %rdi,%rsp; \
74
movq VMXCTX_GUEST_CR2(%rdi),%rsi; \
75
movq %rsi,%cr2; \
76
movq VMXCTX_GUEST_RSI(%rdi),%rsi; \
77
movq VMXCTX_GUEST_RDX(%rdi),%rdx; \
78
movq VMXCTX_GUEST_RCX(%rdi),%rcx; \
79
movq VMXCTX_GUEST_R8(%rdi),%r8; \
80
movq VMXCTX_GUEST_R9(%rdi),%r9; \
81
movq VMXCTX_GUEST_RAX(%rdi),%rax; \
82
movq VMXCTX_GUEST_RBX(%rdi),%rbx; \
83
movq VMXCTX_GUEST_RBP(%rdi),%rbp; \
84
movq VMXCTX_GUEST_R10(%rdi),%r10; \
85
movq VMXCTX_GUEST_R11(%rdi),%r11; \
86
movq VMXCTX_GUEST_R12(%rdi),%r12; \
87
movq VMXCTX_GUEST_R13(%rdi),%r13; \
88
movq VMXCTX_GUEST_R14(%rdi),%r14; \
89
movq VMXCTX_GUEST_R15(%rdi),%r15; \
90
movq VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
91
92
/*
93
* Clobber the remaining registers with guest contents so they can't
94
* be misused.
95
*/
96
#define VMX_GUEST_CLOBBER \
97
xor %rax, %rax; \
98
xor %rcx, %rcx; \
99
xor %rdx, %rdx; \
100
xor %rsi, %rsi; \
101
xor %r8, %r8; \
102
xor %r9, %r9; \
103
xor %r10, %r10; \
104
xor %r11, %r11;
105
106
/*
107
* Save and restore the host context.
108
*
109
* Assumes that %rdi holds a pointer to the 'vmxctx'.
110
*/
111
#define VMX_HOST_SAVE \
112
movq %r15, VMXCTX_HOST_R15(%rdi); \
113
movq %r14, VMXCTX_HOST_R14(%rdi); \
114
movq %r13, VMXCTX_HOST_R13(%rdi); \
115
movq %r12, VMXCTX_HOST_R12(%rdi); \
116
movq %rbp, VMXCTX_HOST_RBP(%rdi); \
117
movq %rsp, VMXCTX_HOST_RSP(%rdi); \
118
movq %rbx, VMXCTX_HOST_RBX(%rdi); \
119
120
#define VMX_HOST_RESTORE \
121
movq VMXCTX_HOST_R15(%rdi), %r15; \
122
movq VMXCTX_HOST_R14(%rdi), %r14; \
123
movq VMXCTX_HOST_R13(%rdi), %r13; \
124
movq VMXCTX_HOST_R12(%rdi), %r12; \
125
movq VMXCTX_HOST_RBP(%rdi), %rbp; \
126
movq VMXCTX_HOST_RSP(%rdi), %rsp; \
127
movq VMXCTX_HOST_RBX(%rdi), %rbx; \
128
129
/*
130
* vmx_enter_guest(struct vmxctx *vmxctx, int launched)
131
* %rdi: pointer to the 'vmxctx'
132
* %rsi: pointer to the 'vmx'
133
* %edx: launch state of the VMCS
134
* Interrupts must be disabled on entry.
135
*/
136
ENTRY(vmx_enter_guest)
137
VENTER
138
/*
139
* Save host state before doing anything else.
140
*/
141
VMX_HOST_SAVE
142
143
guest_restore:
144
movl %edx, %r8d
145
cmpb $0, guest_l1d_flush_sw(%rip)
146
je after_l1d
147
call flush_l1d_sw
148
after_l1d:
149
cmpl $0, %r8d
150
je do_launch
151
VMX_GUEST_RESTORE
152
vmresume
153
/*
154
* In the common case 'vmresume' returns back to the host through
155
* 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
156
*
157
* If there is an error we return VMX_VMRESUME_ERROR to the caller.
158
*/
159
movq %rsp, %rdi /* point %rdi back to 'vmxctx' */
160
movl $VMX_VMRESUME_ERROR, %eax
161
jmp decode_inst_error
162
163
do_launch:
164
VMX_GUEST_RESTORE
165
vmlaunch
166
/*
167
* In the common case 'vmlaunch' returns back to the host through
168
* 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
169
*
170
* If there is an error we return VMX_VMLAUNCH_ERROR to the caller.
171
*/
172
movq %rsp, %rdi /* point %rdi back to 'vmxctx' */
173
movl $VMX_VMLAUNCH_ERROR, %eax
174
/* FALLTHROUGH */
175
decode_inst_error:
176
movl $VM_FAIL_VALID, %r11d
177
movl $VM_FAIL_INVALID, %esi
178
cmovnzl %esi, %r11d
179
movl %r11d, VMXCTX_INST_FAIL_STATUS(%rdi)
180
181
/*
182
* The return value is already populated in %eax so we cannot use
183
* it as a scratch register beyond this point.
184
*/
185
186
VMX_HOST_RESTORE
187
VLEAVE
188
ret
189
190
/*
191
* Non-error VM-exit from the guest. Make this a label so it can
192
* be used by C code when setting up the VMCS.
193
* The VMCS-restored %rsp points to the struct vmxctx
194
*/
195
ALIGN_TEXT
196
.globl vmx_exit_guest_flush_rsb
197
vmx_exit_guest_flush_rsb:
198
/*
199
* Save guest state that is not automatically saved in the vmcs.
200
*/
201
VMX_GUEST_SAVE
202
203
VMX_HOST_RESTORE
204
205
VMX_GUEST_CLOBBER
206
207
/*
208
* To prevent malicious branch target predictions from
209
* affecting the host, overwrite all entries in the RSB upon
210
* exiting a guest.
211
*/
212
mov $16, %ecx /* 16 iterations, two calls per loop */
213
mov %rsp, %rax
214
0: call 2f /* create an RSB entry. */
215
1: pause
216
call 1b /* capture rogue speculation. */
217
2: call 2f /* create an RSB entry. */
218
1: pause
219
call 1b /* capture rogue speculation. */
220
2: sub $1, %ecx
221
jnz 0b
222
mov %rax, %rsp
223
224
/*
225
* This will return to the caller of 'vmx_enter_guest()' with a return
226
* value of VMX_GUEST_VMEXIT.
227
*/
228
movl $VMX_GUEST_VMEXIT, %eax
229
VLEAVE
230
ret
231
232
.globl vmx_exit_guest
233
vmx_exit_guest:
234
/*
235
* Save guest state that is not automatically saved in the vmcs.
236
*/
237
VMX_GUEST_SAVE
238
239
VMX_HOST_RESTORE
240
241
VMX_GUEST_CLOBBER
242
243
/*
244
* This will return to the caller of 'vmx_enter_guest()' with a return
245
* value of VMX_GUEST_VMEXIT.
246
*/
247
movl $VMX_GUEST_VMEXIT, %eax
248
VLEAVE
249
ret
250
END(vmx_enter_guest)
251
252
/*
253
* %rdi = interrupt handler entry point
254
*
255
* Calling sequence described in the "Instruction Set Reference" for the "INT"
256
* instruction in Intel SDM, Vol 2.
257
*/
258
ENTRY(vmx_call_isr)
259
VENTER
260
mov %rsp, %r11 /* save %rsp */
261
and $~0xf, %rsp /* align on 16-byte boundary */
262
pushq $KERNEL_SS /* %ss */
263
pushq %r11 /* %rsp */
264
pushfq /* %rflags */
265
pushq $KERNEL_CS /* %cs */
266
cli /* disable interrupts */
267
callq *%rdi /* push %rip and call isr */
268
VLEAVE
269
ret
270
END(vmx_call_isr)
271
272