Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/hyp/nvhe/host.S
26516 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* Copyright (C) 2020 - Google Inc
4
* Author: Andrew Scull <[email protected]>
5
*/
6
7
#include <linux/linkage.h>
8
9
#include <asm/assembler.h>
10
#include <asm/kvm_arm.h>
11
#include <asm/kvm_asm.h>
12
#include <asm/kvm_mmu.h>
13
#include <asm/kvm_ptrauth.h>
14
15
.text
16
17
SYM_FUNC_START(__host_exit)
18
get_host_ctxt x0, x1
19
20
/* Store the host regs x2 and x3 */
21
stp x2, x3, [x0, #CPU_XREG_OFFSET(2)]
22
23
/* Retrieve the host regs x0-x1 from the stack */
24
ldp x2, x3, [sp], #16 // x0, x1
25
26
/* Store the host regs x0-x1 and x4-x17 */
27
stp x2, x3, [x0, #CPU_XREG_OFFSET(0)]
28
stp x4, x5, [x0, #CPU_XREG_OFFSET(4)]
29
stp x6, x7, [x0, #CPU_XREG_OFFSET(6)]
30
stp x8, x9, [x0, #CPU_XREG_OFFSET(8)]
31
stp x10, x11, [x0, #CPU_XREG_OFFSET(10)]
32
stp x12, x13, [x0, #CPU_XREG_OFFSET(12)]
33
stp x14, x15, [x0, #CPU_XREG_OFFSET(14)]
34
stp x16, x17, [x0, #CPU_XREG_OFFSET(16)]
35
36
/* Store the host regs x18-x29, lr */
37
save_callee_saved_regs x0
38
39
/* Save the host context pointer in x29 across the function call */
40
mov x29, x0
41
42
#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
43
alternative_if_not ARM64_HAS_ADDRESS_AUTH
44
b __skip_pauth_save
45
alternative_else_nop_endif
46
47
alternative_if ARM64_KVM_PROTECTED_MODE
48
/* Save kernel ptrauth keys. */
49
add x18, x29, #CPU_APIAKEYLO_EL1
50
ptrauth_save_state x18, x19, x20
51
52
/* Use hyp keys. */
53
adr_this_cpu x18, kvm_hyp_ctxt, x19
54
add x18, x18, #CPU_APIAKEYLO_EL1
55
ptrauth_restore_state x18, x19, x20
56
isb
57
alternative_else_nop_endif
58
__skip_pauth_save:
59
#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
60
61
bl handle_trap
62
63
__host_enter_restore_full:
64
/* Restore kernel keys. */
65
#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
66
alternative_if_not ARM64_HAS_ADDRESS_AUTH
67
b __skip_pauth_restore
68
alternative_else_nop_endif
69
70
alternative_if ARM64_KVM_PROTECTED_MODE
71
add x18, x29, #CPU_APIAKEYLO_EL1
72
ptrauth_restore_state x18, x19, x20
73
alternative_else_nop_endif
74
__skip_pauth_restore:
75
#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
76
77
/* Restore host regs x0-x17 */
78
ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
79
ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
80
ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
81
ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]
82
83
/* x0-7 are use for panic arguments */
84
__host_enter_for_panic:
85
ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)]
86
ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]
87
ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]
88
ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]
89
ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
90
91
/* Restore host regs x18-x29, lr */
92
restore_callee_saved_regs x29
93
94
/* Do not touch any register after this! */
95
__host_enter_without_restoring:
96
eret
97
sb
98
SYM_FUNC_END(__host_exit)
99
100
/*
101
* void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
102
*/
103
SYM_FUNC_START(__host_enter)
104
mov x29, x0
105
b __host_enter_restore_full
106
SYM_FUNC_END(__host_enter)
107
108
/*
109
* void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
110
* u64 elr, u64 par);
111
*/
112
SYM_FUNC_START(__hyp_do_panic)
113
/* Prepare and exit to the host's panic function. */
114
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
115
PSR_MODE_EL1h)
116
msr spsr_el2, lr
117
adr_l lr, nvhe_hyp_panic_handler
118
hyp_kimg_va lr, x6
119
msr elr_el2, lr
120
121
mov x29, x0
122
123
#ifdef CONFIG_NVHE_EL2_DEBUG
124
/* Ensure host stage-2 is disabled */
125
mrs x0, hcr_el2
126
bic x0, x0, #HCR_VM
127
msr_hcr_el2 x0
128
isb
129
tlbi vmalls12e1
130
dsb nsh
131
#endif
132
133
/* Load the panic arguments into x0-7 */
134
mrs x0, esr_el2
135
mov x4, x3
136
mov x3, x2
137
hyp_pa x3, x6
138
get_vcpu_ptr x5, x6
139
mrs x6, far_el2
140
mrs x7, hpfar_el2
141
142
/* Enter the host, conditionally restoring the host context. */
143
cbz x29, __host_enter_without_restoring
144
b __host_enter_for_panic
145
SYM_FUNC_END(__hyp_do_panic)
146
147
SYM_FUNC_START(__host_hvc)
148
ldp x0, x1, [sp] // Don't fixup the stack yet
149
150
/* No stub for you, sonny Jim */
151
alternative_if ARM64_KVM_PROTECTED_MODE
152
b __host_exit
153
alternative_else_nop_endif
154
155
/* Check for a stub HVC call */
156
cmp x0, #HVC_STUB_HCALL_NR
157
b.hs __host_exit
158
159
add sp, sp, #16
160
/*
161
* Compute the idmap address of __kvm_handle_stub_hvc and
162
* jump there.
163
*
164
* Preserve x0-x4, which may contain stub parameters.
165
*/
166
adr_l x5, __kvm_handle_stub_hvc
167
hyp_pa x5, x6
168
br x5
169
SYM_FUNC_END(__host_hvc)
170
171
.macro host_el1_sync_vect
172
.align 7
173
.L__vect_start\@:
174
stp x0, x1, [sp, #-16]!
175
mrs x0, esr_el2
176
ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
177
cmp x0, #ESR_ELx_EC_HVC64
178
b.eq __host_hvc
179
b __host_exit
180
.L__vect_end\@:
181
.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
182
.error "host_el1_sync_vect larger than vector entry"
183
.endif
184
.endm
185
186
.macro invalid_host_el2_vect
187
.align 7
188
189
/*
190
* Test whether the SP has overflowed, without corrupting a GPR.
191
* nVHE hypervisor stacks are aligned so that the NVHE_STACK_SHIFT bit
192
* of SP should always be 1.
193
*/
194
add sp, sp, x0 // sp' = sp + x0
195
sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
196
tbz x0, #NVHE_STACK_SHIFT, .L__hyp_sp_overflow\@
197
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
198
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
199
200
/*
201
* The panic may not be clean if the exception is taken before the host
202
* context has been saved by __host_exit or after the hyp context has
203
* been partially clobbered by __host_enter.
204
*/
205
b hyp_panic
206
207
.L__hyp_sp_overflow\@:
208
/* Switch to the overflow stack */
209
adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
210
211
b hyp_panic_bad_stack
212
ASM_BUG()
213
.endm
214
215
.macro invalid_host_el1_vect
216
.align 7
217
mov x0, xzr /* restore_host = false */
218
mrs x1, spsr_el2
219
mrs x2, elr_el2
220
mrs x3, par_el1
221
b __hyp_do_panic
222
.endm
223
224
/*
225
* The host vector does not use an ESB instruction in order to avoid consuming
226
* SErrors that should only be consumed by the host. Guest entry is deferred by
227
* __guest_enter if there are any pending asynchronous exceptions so hyp will
228
* always return to the host without having consumerd host SErrors.
229
*
230
* CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the
231
* host knows about the EL2 vectors already, and there is no point in hiding
232
* them.
233
*/
234
.align 11
235
SYM_CODE_START(__kvm_hyp_host_vector)
236
invalid_host_el2_vect // Synchronous EL2t
237
invalid_host_el2_vect // IRQ EL2t
238
invalid_host_el2_vect // FIQ EL2t
239
invalid_host_el2_vect // Error EL2t
240
241
invalid_host_el2_vect // Synchronous EL2h
242
invalid_host_el2_vect // IRQ EL2h
243
invalid_host_el2_vect // FIQ EL2h
244
invalid_host_el2_vect // Error EL2h
245
246
host_el1_sync_vect // Synchronous 64-bit EL1/EL0
247
invalid_host_el1_vect // IRQ 64-bit EL1/EL0
248
invalid_host_el1_vect // FIQ 64-bit EL1/EL0
249
invalid_host_el1_vect // Error 64-bit EL1/EL0
250
251
host_el1_sync_vect // Synchronous 32-bit EL1/EL0
252
invalid_host_el1_vect // IRQ 32-bit EL1/EL0
253
invalid_host_el1_vect // FIQ 32-bit EL1/EL0
254
invalid_host_el1_vect // Error 32-bit EL1/EL0
255
SYM_CODE_END(__kvm_hyp_host_vector)
256
257
/*
258
* Forward SMC with arguments in struct kvm_cpu_context, and
259
* store the result into the same struct. Assumes SMCCC 1.2 or older.
260
*
261
* x0: struct kvm_cpu_context*
262
*/
263
SYM_CODE_START(__kvm_hyp_host_forward_smc)
264
/*
265
* Use x18 to keep the pointer to the host context because
266
* x18 is callee-saved in SMCCC but not in AAPCS64.
267
*/
268
mov x18, x0
269
270
ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
271
ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
272
ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
273
ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
274
ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
275
ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
276
ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
277
ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
278
ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
279
280
smc #0
281
282
stp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
283
stp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
284
stp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
285
stp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
286
stp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
287
stp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
288
stp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
289
stp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
290
stp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
291
292
ret
293
SYM_CODE_END(__kvm_hyp_host_forward_smc)
294
295
/*
296
* kvm_host_psci_cpu_entry is called through br instruction, which requires
297
* bti j instruction as compilers (gcc and llvm) doesn't insert bti j for external
298
* functions, but bti c instead.
299
*/
300
SYM_CODE_START(kvm_host_psci_cpu_entry)
301
bti j
302
b __kvm_host_psci_cpu_entry
303
SYM_CODE_END(kvm_host_psci_cpu_entry)
304
305