Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/hyp/entry.S
26490 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* Copyright (C) 2015 - ARM Ltd
4
* Author: Marc Zyngier <[email protected]>
5
*/
6
7
#include <linux/linkage.h>
8
9
#include <asm/alternative.h>
10
#include <asm/assembler.h>
11
#include <asm/fpsimdmacros.h>
12
#include <asm/kvm.h>
13
#include <asm/kvm_arm.h>
14
#include <asm/kvm_asm.h>
15
#include <asm/kvm_mmu.h>
16
#include <asm/kvm_mte.h>
17
#include <asm/kvm_ptrauth.h>
18
19
.text
20
21
/*
22
* u64 __guest_enter(struct kvm_vcpu *vcpu);
23
*/
24
SYM_FUNC_START(__guest_enter)
25
// x0: vcpu
26
// x1-x17: clobbered by macros
27
// x29: guest context
28
29
adr_this_cpu x1, kvm_hyp_ctxt, x2
30
31
// Store the hyp regs
32
save_callee_saved_regs x1
33
34
// Save hyp's sp_el0
35
save_sp_el0 x1, x2
36
37
// Now the hyp state is stored if we have a pending RAS SError it must
38
// affect the host or hyp. If any asynchronous exception is pending we
39
// defer the guest entry. The DSB isn't necessary before v8.2 as any
40
// SError would be fatal.
41
alternative_if ARM64_HAS_RAS_EXTN
42
dsb nshst
43
isb
44
alternative_else_nop_endif
45
mrs x1, isr_el1
46
cbz x1, 1f
47
48
// Ensure that __guest_enter() always provides a context
49
// synchronization event so that callers don't need ISBs for anything
50
// that would usually be synchonized by the ERET.
51
isb
52
mov x0, #ARM_EXCEPTION_IRQ
53
ret
54
55
1:
56
set_loaded_vcpu x0, x1, x2
57
58
add x29, x0, #VCPU_CONTEXT
59
60
// mte_switch_to_guest(g_ctxt, h_ctxt, tmp1)
61
mte_switch_to_guest x29, x1, x2
62
63
// Macro ptrauth_switch_to_guest format:
64
// ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
65
// The below macro to restore guest keys is not implemented in C code
66
// as it may cause Pointer Authentication key signing mismatch errors
67
// when this feature is enabled for kernel code.
68
ptrauth_switch_to_guest x29, x0, x1, x2
69
70
// Restore the guest's sp_el0
71
restore_sp_el0 x29, x0
72
73
// Restore guest regs x0-x17
74
ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
75
ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
76
ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
77
ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]
78
ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)]
79
ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]
80
ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]
81
ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]
82
ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
83
84
// Restore guest regs x18-x29, lr
85
restore_callee_saved_regs x29
86
87
// Do not touch any register after this!
88
eret
89
sb
90
91
SYM_INNER_LABEL(__guest_exit_restore_elr_and_panic, SYM_L_GLOBAL)
92
// x2-x29,lr: vcpu regs
93
// vcpu x0-x1 on the stack
94
95
adr_this_cpu x0, kvm_hyp_ctxt, x1
96
ldr x0, [x0, #CPU_ELR_EL2]
97
msr elr_el2, x0
98
99
SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
100
// x2-x29,lr: vcpu regs
101
// vcpu x0-x1 on the stack
102
103
// If the hyp context is loaded, go straight to hyp_panic
104
get_loaded_vcpu x0, x1
105
cbnz x0, 1f
106
b hyp_panic
107
108
1:
109
// The hyp context is saved so make sure it is restored to allow
110
// hyp_panic to run at hyp and, subsequently, panic to run in the host.
111
// This makes use of __guest_exit to avoid duplication but sets the
112
// return address to tail call into hyp_panic. As a side effect, the
113
// current state is saved to the guest context but it will only be
114
// accurate if the guest had been completely restored.
115
adr_this_cpu x0, kvm_hyp_ctxt, x1
116
adr_l x1, hyp_panic
117
str x1, [x0, #CPU_XREG_OFFSET(30)]
118
119
get_vcpu_ptr x1, x0
120
121
SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
122
// x0: return code
123
// x1: vcpu
124
// x2-x29,lr: vcpu regs
125
// vcpu x0-x1 on the stack
126
127
add x1, x1, #VCPU_CONTEXT
128
129
ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
130
131
// Store the guest regs x2 and x3
132
stp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
133
134
// Retrieve the guest regs x0-x1 from the stack
135
ldp x2, x3, [sp], #16 // x0, x1
136
137
// Store the guest regs x0-x1 and x4-x17
138
stp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
139
stp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
140
stp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
141
stp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
142
stp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
143
stp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
144
stp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
145
stp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
146
147
// Store the guest regs x18-x29, lr
148
save_callee_saved_regs x1
149
150
// Store the guest's sp_el0
151
save_sp_el0 x1, x2
152
153
adr_this_cpu x2, kvm_hyp_ctxt, x3
154
155
// Macro ptrauth_switch_to_hyp format:
156
// ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3)
157
// The below macro to save/restore keys is not implemented in C code
158
// as it may cause Pointer Authentication key signing mismatch errors
159
// when this feature is enabled for kernel code.
160
ptrauth_switch_to_hyp x1, x2, x3, x4, x5
161
162
// mte_switch_to_hyp(g_ctxt, h_ctxt, reg1)
163
mte_switch_to_hyp x1, x2, x3
164
165
// Restore hyp's sp_el0
166
restore_sp_el0 x2, x3
167
168
// Now restore the hyp regs
169
restore_callee_saved_regs x2
170
171
set_loaded_vcpu xzr, x2, x3
172
173
alternative_if ARM64_HAS_RAS_EXTN
174
// If we have the RAS extensions we can consume a pending error
175
// without an unmask-SError and isb. The ESB-instruction consumed any
176
// pending guest error when we took the exception from the guest.
177
mrs_s x2, SYS_DISR_EL1
178
str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
179
cbz x2, 1f
180
msr_s SYS_DISR_EL1, xzr
181
orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
182
1: ret
183
alternative_else
184
dsb sy // Synchronize against in-flight ld/st
185
isb // Prevent an early read of side-effect free ISR
186
mrs x2, isr_el1
187
tbnz x2, #ISR_EL1_A_SHIFT, 2f
188
ret
189
nop
190
2:
191
alternative_endif
192
// We know we have a pending asynchronous abort, now is the
193
// time to flush it out. From your VAXorcist book, page 666:
194
// "Threaten me not, oh Evil one! For I speak with
195
// the power of DEC, and I command thee to show thyself!"
196
mrs x2, elr_el2
197
mrs x3, esr_el2
198
mrs x4, spsr_el2
199
mov x5, x0
200
201
msr daifclr, #4 // Unmask aborts
202
203
// This is our single instruction exception window. A pending
204
// SError is guaranteed to occur at the earliest when we unmask
205
// it, and at the latest just after the ISB.
206
abort_guest_exit_start:
207
208
isb
209
210
abort_guest_exit_end:
211
212
msr daifset, #4 // Mask aborts
213
ret
214
215
_kvm_extable abort_guest_exit_start, 9997f
216
_kvm_extable abort_guest_exit_end, 9997f
217
9997:
218
msr daifset, #4 // Mask aborts
219
mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
220
221
// restore the EL1 exception context so that we can report some
222
// information. Merge the exception code with the SError pending bit.
223
msr elr_el2, x2
224
msr esr_el2, x3
225
msr spsr_el2, x4
226
orr x0, x0, x5
227
1: ret
228
SYM_FUNC_END(__guest_enter)
229
230