Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kvm/svm/vmenter.S
52376 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#include <linux/linkage.h>
3
#include <asm/asm.h>
4
#include <asm/asm-offsets.h>
5
#include <asm/bitsperlong.h>
6
#include <asm/frame.h>
7
#include <asm/kvm_vcpu_regs.h>
8
#include <asm/nospec-branch.h>
9
#include "kvm-asm-offsets.h"
10
11
#define WORD_SIZE (BITS_PER_LONG / 8)
12
13
/* Intentionally omit RAX as it's context switched by hardware */
14
#define VCPU_RCX (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
15
#define VCPU_RDX (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
16
#define VCPU_RBX (SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
17
/* Intentionally omit RSP as it's context switched by hardware */
18
#define VCPU_RBP (SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
19
#define VCPU_RSI (SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
20
#define VCPU_RDI (SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
21
22
#ifdef CONFIG_X86_64
23
#define VCPU_R8 (SVM_vcpu_arch_regs + __VCPU_REGS_R8 * WORD_SIZE)
24
#define VCPU_R9 (SVM_vcpu_arch_regs + __VCPU_REGS_R9 * WORD_SIZE)
25
#define VCPU_R10 (SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
26
#define VCPU_R11 (SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
27
#define VCPU_R12 (SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
28
#define VCPU_R13 (SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
29
#define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
30
#define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
31
#endif
32
33
#define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa)
34
35
.section .noinstr.text, "ax"
36
37
.macro RESTORE_GUEST_SPEC_CTRL
38
/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
39
ALTERNATIVE_2 "", \
40
"jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
41
"", X86_FEATURE_V_SPEC_CTRL
42
801:
43
.endm
44
.macro RESTORE_GUEST_SPEC_CTRL_BODY
45
800:
46
/*
47
* SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
48
* host's, write the MSR. This is kept out-of-line so that the common
49
* case does not have to jump.
50
*
51
* IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
52
* there must not be any returns or indirect branches between this code
53
* and vmentry.
54
*/
55
#ifdef CONFIG_X86_64
56
mov SVM_spec_ctrl(%rdi), %rdx
57
cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx
58
je 801b
59
movl %edx, %eax
60
shr $32, %rdx
61
#else
62
mov SVM_spec_ctrl(%edi), %eax
63
mov PER_CPU_VAR(x86_spec_ctrl_current), %ecx
64
xor %eax, %ecx
65
mov SVM_spec_ctrl + 4(%edi), %edx
66
mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %esi
67
xor %edx, %esi
68
or %esi, %ecx
69
je 801b
70
#endif
71
mov $MSR_IA32_SPEC_CTRL, %ecx
72
wrmsr
73
jmp 801b
74
.endm
75
76
.macro RESTORE_HOST_SPEC_CTRL
77
/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
78
ALTERNATIVE_2 "", \
79
"jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
80
"", X86_FEATURE_V_SPEC_CTRL
81
901:
82
.endm
83
.macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted:req
84
900:
85
/* Same for after vmexit. */
86
mov $MSR_IA32_SPEC_CTRL, %ecx
87
88
/*
89
* Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
90
* if it was not intercepted during guest execution.
91
*/
92
cmpb $0, \spec_ctrl_intercepted
93
jnz 998f
94
rdmsr
95
movl %eax, SVM_spec_ctrl(%_ASM_DI)
96
movl %edx, SVM_spec_ctrl + 4(%_ASM_DI)
97
998:
98
/* Now restore the host value of the MSR if different from the guest's. */
99
#ifdef CONFIG_X86_64
100
mov PER_CPU_VAR(x86_spec_ctrl_current), %rdx
101
cmp SVM_spec_ctrl(%rdi), %rdx
102
je 901b
103
movl %edx, %eax
104
shr $32, %rdx
105
#else
106
mov PER_CPU_VAR(x86_spec_ctrl_current), %eax
107
mov SVM_spec_ctrl(%edi), %esi
108
xor %eax, %esi
109
mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edx
110
mov SVM_spec_ctrl + 4(%edi), %edi
111
xor %edx, %edi
112
or %edi, %esi
113
je 901b
114
#endif
115
wrmsr
116
jmp 901b
117
.endm
118
119
#define SVM_CLEAR_CPU_BUFFERS \
120
ALTERNATIVE "", __CLEAR_CPU_BUFFERS, X86_FEATURE_CLEAR_CPU_BUF_VM
121
122
/**
123
* __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
124
* @svm: struct vcpu_svm *
125
* @spec_ctrl_intercepted: bool
126
*/
127
SYM_FUNC_START(__svm_vcpu_run)
128
push %_ASM_BP
129
mov %_ASM_SP, %_ASM_BP
130
#ifdef CONFIG_X86_64
131
push %r15
132
push %r14
133
push %r13
134
push %r12
135
#else
136
push %edi
137
push %esi
138
#endif
139
push %_ASM_BX
140
141
/*
142
* Save variables needed after vmexit on the stack, in inverse
143
* order compared to when they are needed.
144
*/
145
146
/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
147
push %_ASM_ARG2
148
149
/* Needed to restore access to percpu variables. */
150
__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
151
152
/* Finally save @svm. */
153
push %_ASM_ARG1
154
155
.ifnc _ASM_ARG1, _ASM_DI
156
/*
157
* Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
158
* and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
159
*/
160
mov %_ASM_ARG1, %_ASM_DI
161
.endif
162
163
/* Clobbers RAX, RCX, RDX (and ESI on 32-bit), consumes RDI (@svm). */
164
RESTORE_GUEST_SPEC_CTRL
165
166
/*
167
* Use a single vmcb (vmcb01 because it's always valid) for
168
* context switching guest state via VMLOAD/VMSAVE, that way
169
* the state doesn't need to be copied between vmcb01 and
170
* vmcb02 when switching vmcbs for nested virtualization.
171
*/
172
mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
173
1: vmload %_ASM_AX
174
2:
175
176
/* Get svm->current_vmcb->pa into RAX. */
177
mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
178
mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
179
180
/* Load guest registers. */
181
mov VCPU_RCX(%_ASM_DI), %_ASM_CX
182
mov VCPU_RDX(%_ASM_DI), %_ASM_DX
183
mov VCPU_RBX(%_ASM_DI), %_ASM_BX
184
mov VCPU_RBP(%_ASM_DI), %_ASM_BP
185
mov VCPU_RSI(%_ASM_DI), %_ASM_SI
186
#ifdef CONFIG_X86_64
187
mov VCPU_R8 (%_ASM_DI), %r8
188
mov VCPU_R9 (%_ASM_DI), %r9
189
mov VCPU_R10(%_ASM_DI), %r10
190
mov VCPU_R11(%_ASM_DI), %r11
191
mov VCPU_R12(%_ASM_DI), %r12
192
mov VCPU_R13(%_ASM_DI), %r13
193
mov VCPU_R14(%_ASM_DI), %r14
194
mov VCPU_R15(%_ASM_DI), %r15
195
#endif
196
mov VCPU_RDI(%_ASM_DI), %_ASM_DI
197
198
/* Clobbers EFLAGS.ZF */
199
SVM_CLEAR_CPU_BUFFERS
200
201
/* Enter guest mode */
202
3: vmrun %_ASM_AX
203
4:
204
/* Pop @svm to RAX while it's the only available register. */
205
pop %_ASM_AX
206
207
/* Save all guest registers. */
208
mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
209
mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
210
mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
211
mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
212
mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
213
mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
214
#ifdef CONFIG_X86_64
215
mov %r8, VCPU_R8 (%_ASM_AX)
216
mov %r9, VCPU_R9 (%_ASM_AX)
217
mov %r10, VCPU_R10(%_ASM_AX)
218
mov %r11, VCPU_R11(%_ASM_AX)
219
mov %r12, VCPU_R12(%_ASM_AX)
220
mov %r13, VCPU_R13(%_ASM_AX)
221
mov %r14, VCPU_R14(%_ASM_AX)
222
mov %r15, VCPU_R15(%_ASM_AX)
223
#endif
224
225
/* @svm can stay in RDI from now on. */
226
mov %_ASM_AX, %_ASM_DI
227
228
mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
229
5: vmsave %_ASM_AX
230
6:
231
232
/* Restores GSBASE among other things, allowing access to percpu data. */
233
pop %_ASM_AX
234
7: vmload %_ASM_AX
235
8:
236
237
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
238
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
239
240
/*
241
* Clobbers RAX, RCX, RDX (and ESI, EDI on 32-bit), consumes RDI (@svm)
242
* and RSP (pointer to @spec_ctrl_intercepted).
243
*/
244
RESTORE_HOST_SPEC_CTRL
245
246
/*
247
* Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
248
* untrained as soon as we exit the VM and are back to the
249
* kernel. This should be done before re-enabling interrupts
250
* because interrupt handlers won't sanitize 'ret' if the return is
251
* from the kernel.
252
*/
253
UNTRAIN_RET_VM
254
255
/*
256
* Clear all general purpose registers except RSP and RAX to prevent
257
* speculative use of the guest's values, even those that are reloaded
258
* via the stack. In theory, an L1 cache miss when restoring registers
259
* could lead to speculative execution with the guest's values.
260
* Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
261
* free. RSP and RAX are exempt as they are restored by hardware
262
* during VM-Exit.
263
*/
264
xor %ecx, %ecx
265
xor %edx, %edx
266
xor %ebx, %ebx
267
xor %ebp, %ebp
268
xor %esi, %esi
269
xor %edi, %edi
270
#ifdef CONFIG_X86_64
271
xor %r8d, %r8d
272
xor %r9d, %r9d
273
xor %r10d, %r10d
274
xor %r11d, %r11d
275
xor %r12d, %r12d
276
xor %r13d, %r13d
277
xor %r14d, %r14d
278
xor %r15d, %r15d
279
#endif
280
281
/* "Pop" @spec_ctrl_intercepted. */
282
pop %_ASM_BX
283
284
pop %_ASM_BX
285
286
#ifdef CONFIG_X86_64
287
pop %r12
288
pop %r13
289
pop %r14
290
pop %r15
291
#else
292
pop %esi
293
pop %edi
294
#endif
295
pop %_ASM_BP
296
RET
297
298
RESTORE_GUEST_SPEC_CTRL_BODY
299
RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)
300
301
10: cmpb $0, _ASM_RIP(kvm_rebooting)
302
jne 2b
303
ud2
304
30: cmpb $0, _ASM_RIP(kvm_rebooting)
305
jne 4b
306
ud2
307
50: cmpb $0, _ASM_RIP(kvm_rebooting)
308
jne 6b
309
ud2
310
70: cmpb $0, _ASM_RIP(kvm_rebooting)
311
jne 8b
312
ud2
313
314
_ASM_EXTABLE(1b, 10b)
315
_ASM_EXTABLE(3b, 30b)
316
_ASM_EXTABLE(5b, 50b)
317
_ASM_EXTABLE(7b, 70b)
318
319
SYM_FUNC_END(__svm_vcpu_run)
320
321
#ifdef CONFIG_KVM_AMD_SEV
322
323
324
#ifdef CONFIG_X86_64
325
#define SEV_ES_GPRS_BASE 0x300
326
#define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
327
#define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
328
#define SEV_ES_RSI (SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE)
329
#define SEV_ES_RDI (SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE)
330
#define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
331
#define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
332
#define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
333
#define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
334
#endif
335
336
/**
337
* __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
338
* @svm: struct vcpu_svm *
339
* @spec_ctrl_intercepted: bool
340
*/
341
SYM_FUNC_START(__svm_sev_es_vcpu_run)
342
FRAME_BEGIN
343
344
/*
345
* Save non-volatile (callee-saved) registers to the host save area.
346
* Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not
347
* saved on VMRUN.
348
*/
349
mov %rbp, SEV_ES_RBP (%rdx)
350
mov %r15, SEV_ES_R15 (%rdx)
351
mov %r14, SEV_ES_R14 (%rdx)
352
mov %r13, SEV_ES_R13 (%rdx)
353
mov %r12, SEV_ES_R12 (%rdx)
354
mov %rbx, SEV_ES_RBX (%rdx)
355
356
/*
357
* Save volatile registers that hold arguments that are needed after
358
* #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted).
359
*/
360
mov %rdi, SEV_ES_RDI (%rdx)
361
mov %rsi, SEV_ES_RSI (%rdx)
362
363
/* Clobbers RAX, RCX, and RDX (@hostsa), consumes RDI (@svm). */
364
RESTORE_GUEST_SPEC_CTRL
365
366
/* Get svm->current_vmcb->pa into RAX. */
367
mov SVM_current_vmcb(%rdi), %rax
368
mov KVM_VMCB_pa(%rax), %rax
369
370
/* Clobbers EFLAGS.ZF */
371
SVM_CLEAR_CPU_BUFFERS
372
373
/* Enter guest mode */
374
1: vmrun %rax
375
2:
376
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
377
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
378
379
/* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
380
RESTORE_HOST_SPEC_CTRL
381
382
/*
383
* Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
384
* untrained as soon as we exit the VM and are back to the
385
* kernel. This should be done before re-enabling interrupts
386
* because interrupt handlers won't sanitize RET if the return is
387
* from the kernel.
388
*/
389
UNTRAIN_RET_VM
390
391
FRAME_END
392
RET
393
394
RESTORE_GUEST_SPEC_CTRL_BODY
395
RESTORE_HOST_SPEC_CTRL_BODY %sil
396
397
3: cmpb $0, kvm_rebooting(%rip)
398
jne 2b
399
ud2
400
401
_ASM_EXTABLE(1b, 3b)
402
403
SYM_FUNC_END(__svm_sev_es_vcpu_run)
404
#endif /* CONFIG_KVM_AMD_SEV */
405
406