Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kvm/svm/vmenter.S
26471 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#include <linux/linkage.h>
3
#include <asm/asm.h>
4
#include <asm/asm-offsets.h>
5
#include <asm/bitsperlong.h>
6
#include <asm/frame.h>
7
#include <asm/kvm_vcpu_regs.h>
8
#include <asm/nospec-branch.h>
9
#include "kvm-asm-offsets.h"
10
11
#define WORD_SIZE (BITS_PER_LONG / 8)
12
13
/* Intentionally omit RAX as it's context switched by hardware */
14
#define VCPU_RCX (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
15
#define VCPU_RDX (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
16
#define VCPU_RBX (SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
17
/* Intentionally omit RSP as it's context switched by hardware */
18
#define VCPU_RBP (SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
19
#define VCPU_RSI (SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
20
#define VCPU_RDI (SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
21
22
#ifdef CONFIG_X86_64
23
#define VCPU_R8 (SVM_vcpu_arch_regs + __VCPU_REGS_R8 * WORD_SIZE)
24
#define VCPU_R9 (SVM_vcpu_arch_regs + __VCPU_REGS_R9 * WORD_SIZE)
25
#define VCPU_R10 (SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
26
#define VCPU_R11 (SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
27
#define VCPU_R12 (SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
28
#define VCPU_R13 (SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
29
#define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
30
#define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
31
#endif
32
33
#define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa)
34
35
.section .noinstr.text, "ax"
36
37
.macro RESTORE_GUEST_SPEC_CTRL
38
/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
39
ALTERNATIVE_2 "", \
40
"jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
41
"", X86_FEATURE_V_SPEC_CTRL
42
801:
43
.endm
44
.macro RESTORE_GUEST_SPEC_CTRL_BODY
45
800:
46
/*
47
* SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
48
* host's, write the MSR. This is kept out-of-line so that the common
49
* case does not have to jump.
50
*
51
* IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
52
* there must not be any returns or indirect branches between this code
53
* and vmentry.
54
*/
55
movl SVM_spec_ctrl(%_ASM_DI), %eax
56
cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
57
je 801b
58
mov $MSR_IA32_SPEC_CTRL, %ecx
59
xor %edx, %edx
60
wrmsr
61
jmp 801b
62
.endm
63
64
.macro RESTORE_HOST_SPEC_CTRL
65
/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
66
ALTERNATIVE_2 "", \
67
"jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
68
"", X86_FEATURE_V_SPEC_CTRL
69
901:
70
.endm
71
.macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted:req
72
900:
73
/* Same for after vmexit. */
74
mov $MSR_IA32_SPEC_CTRL, %ecx
75
76
/*
77
* Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
78
* if it was not intercepted during guest execution.
79
*/
80
cmpb $0, \spec_ctrl_intercepted
81
jnz 998f
82
rdmsr
83
movl %eax, SVM_spec_ctrl(%_ASM_DI)
84
998:
85
86
/* Now restore the host value of the MSR if different from the guest's. */
87
movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
88
cmp SVM_spec_ctrl(%_ASM_DI), %eax
89
je 901b
90
xor %edx, %edx
91
wrmsr
92
jmp 901b
93
.endm
94
95
96
/**
97
* __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
98
* @svm: struct vcpu_svm *
99
* @spec_ctrl_intercepted: bool
100
*/
101
SYM_FUNC_START(__svm_vcpu_run)
102
push %_ASM_BP
103
mov %_ASM_SP, %_ASM_BP
104
#ifdef CONFIG_X86_64
105
push %r15
106
push %r14
107
push %r13
108
push %r12
109
#else
110
push %edi
111
push %esi
112
#endif
113
push %_ASM_BX
114
115
/*
116
* Save variables needed after vmexit on the stack, in inverse
117
* order compared to when they are needed.
118
*/
119
120
/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
121
push %_ASM_ARG2
122
123
/* Needed to restore access to percpu variables. */
124
__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
125
126
/* Finally save @svm. */
127
push %_ASM_ARG1
128
129
.ifnc _ASM_ARG1, _ASM_DI
130
/*
131
* Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
132
* and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
133
*/
134
mov %_ASM_ARG1, %_ASM_DI
135
.endif
136
137
/* Clobbers RAX, RCX, RDX. */
138
RESTORE_GUEST_SPEC_CTRL
139
140
/*
141
* Use a single vmcb (vmcb01 because it's always valid) for
142
* context switching guest state via VMLOAD/VMSAVE, that way
143
* the state doesn't need to be copied between vmcb01 and
144
* vmcb02 when switching vmcbs for nested virtualization.
145
*/
146
mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
147
1: vmload %_ASM_AX
148
2:
149
150
/* Get svm->current_vmcb->pa into RAX. */
151
mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
152
mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
153
154
/* Load guest registers. */
155
mov VCPU_RCX(%_ASM_DI), %_ASM_CX
156
mov VCPU_RDX(%_ASM_DI), %_ASM_DX
157
mov VCPU_RBX(%_ASM_DI), %_ASM_BX
158
mov VCPU_RBP(%_ASM_DI), %_ASM_BP
159
mov VCPU_RSI(%_ASM_DI), %_ASM_SI
160
#ifdef CONFIG_X86_64
161
mov VCPU_R8 (%_ASM_DI), %r8
162
mov VCPU_R9 (%_ASM_DI), %r9
163
mov VCPU_R10(%_ASM_DI), %r10
164
mov VCPU_R11(%_ASM_DI), %r11
165
mov VCPU_R12(%_ASM_DI), %r12
166
mov VCPU_R13(%_ASM_DI), %r13
167
mov VCPU_R14(%_ASM_DI), %r14
168
mov VCPU_R15(%_ASM_DI), %r15
169
#endif
170
mov VCPU_RDI(%_ASM_DI), %_ASM_DI
171
172
/* Clobbers EFLAGS.ZF */
173
VM_CLEAR_CPU_BUFFERS
174
175
/* Enter guest mode */
176
3: vmrun %_ASM_AX
177
4:
178
/* Pop @svm to RAX while it's the only available register. */
179
pop %_ASM_AX
180
181
/* Save all guest registers. */
182
mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
183
mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
184
mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
185
mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
186
mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
187
mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
188
#ifdef CONFIG_X86_64
189
mov %r8, VCPU_R8 (%_ASM_AX)
190
mov %r9, VCPU_R9 (%_ASM_AX)
191
mov %r10, VCPU_R10(%_ASM_AX)
192
mov %r11, VCPU_R11(%_ASM_AX)
193
mov %r12, VCPU_R12(%_ASM_AX)
194
mov %r13, VCPU_R13(%_ASM_AX)
195
mov %r14, VCPU_R14(%_ASM_AX)
196
mov %r15, VCPU_R15(%_ASM_AX)
197
#endif
198
199
/* @svm can stay in RDI from now on. */
200
mov %_ASM_AX, %_ASM_DI
201
202
mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
203
5: vmsave %_ASM_AX
204
6:
205
206
/* Restores GSBASE among other things, allowing access to percpu data. */
207
pop %_ASM_AX
208
7: vmload %_ASM_AX
209
8:
210
211
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
212
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
213
214
/* Clobbers RAX, RCX, RDX. */
215
RESTORE_HOST_SPEC_CTRL
216
217
/*
218
* Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
219
* untrained as soon as we exit the VM and are back to the
220
* kernel. This should be done before re-enabling interrupts
221
* because interrupt handlers won't sanitize 'ret' if the return is
222
* from the kernel.
223
*/
224
UNTRAIN_RET_VM
225
226
/*
227
* Clear all general purpose registers except RSP and RAX to prevent
228
* speculative use of the guest's values, even those that are reloaded
229
* via the stack. In theory, an L1 cache miss when restoring registers
230
* could lead to speculative execution with the guest's values.
231
* Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
232
* free. RSP and RAX are exempt as they are restored by hardware
233
* during VM-Exit.
234
*/
235
xor %ecx, %ecx
236
xor %edx, %edx
237
xor %ebx, %ebx
238
xor %ebp, %ebp
239
xor %esi, %esi
240
xor %edi, %edi
241
#ifdef CONFIG_X86_64
242
xor %r8d, %r8d
243
xor %r9d, %r9d
244
xor %r10d, %r10d
245
xor %r11d, %r11d
246
xor %r12d, %r12d
247
xor %r13d, %r13d
248
xor %r14d, %r14d
249
xor %r15d, %r15d
250
#endif
251
252
/* "Pop" @spec_ctrl_intercepted. */
253
pop %_ASM_BX
254
255
pop %_ASM_BX
256
257
#ifdef CONFIG_X86_64
258
pop %r12
259
pop %r13
260
pop %r14
261
pop %r15
262
#else
263
pop %esi
264
pop %edi
265
#endif
266
pop %_ASM_BP
267
RET
268
269
RESTORE_GUEST_SPEC_CTRL_BODY
270
RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)
271
272
10: cmpb $0, _ASM_RIP(kvm_rebooting)
273
jne 2b
274
ud2
275
30: cmpb $0, _ASM_RIP(kvm_rebooting)
276
jne 4b
277
ud2
278
50: cmpb $0, _ASM_RIP(kvm_rebooting)
279
jne 6b
280
ud2
281
70: cmpb $0, _ASM_RIP(kvm_rebooting)
282
jne 8b
283
ud2
284
285
_ASM_EXTABLE(1b, 10b)
286
_ASM_EXTABLE(3b, 30b)
287
_ASM_EXTABLE(5b, 50b)
288
_ASM_EXTABLE(7b, 70b)
289
290
SYM_FUNC_END(__svm_vcpu_run)
291
292
#ifdef CONFIG_KVM_AMD_SEV
293
294
295
#ifdef CONFIG_X86_64
296
#define SEV_ES_GPRS_BASE 0x300
297
#define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
298
#define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
299
#define SEV_ES_RSI (SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE)
300
#define SEV_ES_RDI (SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE)
301
#define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
302
#define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
303
#define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
304
#define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
305
#endif
306
307
/**
308
* __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
309
* @svm: struct vcpu_svm *
310
* @spec_ctrl_intercepted: bool
311
*/
312
SYM_FUNC_START(__svm_sev_es_vcpu_run)
313
FRAME_BEGIN
314
315
/*
316
* Save non-volatile (callee-saved) registers to the host save area.
317
* Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not
318
* saved on VMRUN.
319
*/
320
mov %rbp, SEV_ES_RBP (%rdx)
321
mov %r15, SEV_ES_R15 (%rdx)
322
mov %r14, SEV_ES_R14 (%rdx)
323
mov %r13, SEV_ES_R13 (%rdx)
324
mov %r12, SEV_ES_R12 (%rdx)
325
mov %rbx, SEV_ES_RBX (%rdx)
326
327
/*
328
* Save volatile registers that hold arguments that are needed after
329
* #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted).
330
*/
331
mov %rdi, SEV_ES_RDI (%rdx)
332
mov %rsi, SEV_ES_RSI (%rdx)
333
334
/* Clobbers RAX, RCX, RDX (@hostsa). */
335
RESTORE_GUEST_SPEC_CTRL
336
337
/* Get svm->current_vmcb->pa into RAX. */
338
mov SVM_current_vmcb(%rdi), %rax
339
mov KVM_VMCB_pa(%rax), %rax
340
341
/* Clobbers EFLAGS.ZF */
342
VM_CLEAR_CPU_BUFFERS
343
344
/* Enter guest mode */
345
1: vmrun %rax
346
2:
347
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
348
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
349
350
/* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
351
RESTORE_HOST_SPEC_CTRL
352
353
/*
354
* Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
355
* untrained as soon as we exit the VM and are back to the
356
* kernel. This should be done before re-enabling interrupts
357
* because interrupt handlers won't sanitize RET if the return is
358
* from the kernel.
359
*/
360
UNTRAIN_RET_VM
361
362
FRAME_END
363
RET
364
365
RESTORE_GUEST_SPEC_CTRL_BODY
366
RESTORE_HOST_SPEC_CTRL_BODY %sil
367
368
3: cmpb $0, kvm_rebooting(%rip)
369
jne 2b
370
ud2
371
372
_ASM_EXTABLE(1b, 3b)
373
374
SYM_FUNC_END(__svm_sev_es_vcpu_run)
375
#endif /* CONFIG_KVM_AMD_SEV */
376
377