Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/hyp/exception.c
26490 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Fault injection for both 32 and 64bit guests.
4
*
5
* Copyright (C) 2012,2013 - ARM Ltd
6
* Author: Marc Zyngier <[email protected]>
7
*
8
* Based on arch/arm/kvm/emulate.c
9
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
10
* Author: Christoffer Dall <[email protected]>
11
*/
12
13
#include <hyp/adjust_pc.h>
14
#include <linux/kvm_host.h>
15
#include <asm/kvm_emulate.h>
16
#include <asm/kvm_mmu.h>
17
#include <asm/kvm_nested.h>
18
19
#if !defined (__KVM_NVHE_HYPERVISOR__) && !defined (__KVM_VHE_HYPERVISOR__)
20
#error Hypervisor code only!
21
#endif
22
23
static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
24
{
25
if (has_vhe())
26
return vcpu_read_sys_reg(vcpu, reg);
27
28
return __vcpu_sys_reg(vcpu, reg);
29
}
30
31
static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
32
{
33
if (has_vhe())
34
vcpu_write_sys_reg(vcpu, val, reg);
35
else
36
__vcpu_assign_sys_reg(vcpu, reg, val);
37
}
38
39
static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
40
u64 val)
41
{
42
if (has_vhe()) {
43
if (target_mode == PSR_MODE_EL1h)
44
vcpu_write_sys_reg(vcpu, val, SPSR_EL1);
45
else
46
vcpu_write_sys_reg(vcpu, val, SPSR_EL2);
47
} else {
48
__vcpu_assign_sys_reg(vcpu, SPSR_EL1, val);
49
}
50
}
51
52
static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
53
{
54
if (has_vhe() && vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
55
write_sysreg(val, spsr_abt);
56
else
57
vcpu->arch.ctxt.spsr_abt = val;
58
}
59
60
static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
61
{
62
if (has_vhe() && vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
63
write_sysreg(val, spsr_und);
64
else
65
vcpu->arch.ctxt.spsr_und = val;
66
}
67
68
/*
69
* This performs the exception entry at a given EL (@target_mode), stashing PC
70
* and PSTATE into ELR and SPSR respectively, and compute the new PC/PSTATE.
71
* The EL passed to this function *must* be a non-secure, privileged mode with
72
* bit 0 being set (PSTATE.SP == 1).
73
*
74
* When an exception is taken, most PSTATE fields are left unchanged in the
75
* handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
76
* of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
77
* layouts, so we don't need to shuffle these for exceptions from AArch32 EL0.
78
*
79
* For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429.
80
* For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426.
81
*
82
* Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
83
* MSB to LSB.
84
*/
85
static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
86
enum exception_type type)
87
{
88
unsigned long sctlr, vbar, old, new, mode;
89
u64 exc_offset;
90
91
mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
92
93
if (mode == target_mode)
94
exc_offset = CURRENT_EL_SP_ELx_VECTOR;
95
else if ((mode | PSR_MODE_THREAD_BIT) == target_mode)
96
exc_offset = CURRENT_EL_SP_EL0_VECTOR;
97
else if (!(mode & PSR_MODE32_BIT))
98
exc_offset = LOWER_EL_AArch64_VECTOR;
99
else
100
exc_offset = LOWER_EL_AArch32_VECTOR;
101
102
switch (target_mode) {
103
case PSR_MODE_EL1h:
104
vbar = __vcpu_read_sys_reg(vcpu, VBAR_EL1);
105
sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
106
__vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
107
break;
108
case PSR_MODE_EL2h:
109
vbar = __vcpu_read_sys_reg(vcpu, VBAR_EL2);
110
sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL2);
111
__vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL2);
112
break;
113
default:
114
/* Don't do that */
115
BUG();
116
}
117
118
*vcpu_pc(vcpu) = vbar + exc_offset + type;
119
120
old = *vcpu_cpsr(vcpu);
121
new = 0;
122
123
new |= (old & PSR_N_BIT);
124
new |= (old & PSR_Z_BIT);
125
new |= (old & PSR_C_BIT);
126
new |= (old & PSR_V_BIT);
127
128
if (kvm_has_mte(kern_hyp_va(vcpu->kvm)))
129
new |= PSR_TCO_BIT;
130
131
new |= (old & PSR_DIT_BIT);
132
133
// PSTATE.UAO is set to zero upon any exception to AArch64
134
// See ARM DDI 0487E.a, page D5-2579.
135
136
// PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0
137
// SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented
138
// See ARM DDI 0487E.a, page D5-2578.
139
new |= (old & PSR_PAN_BIT);
140
if (!(sctlr & SCTLR_EL1_SPAN))
141
new |= PSR_PAN_BIT;
142
143
// PSTATE.SS is set to zero upon any exception to AArch64
144
// See ARM DDI 0487E.a, page D2-2452.
145
146
// PSTATE.IL is set to zero upon any exception to AArch64
147
// See ARM DDI 0487E.a, page D1-2306.
148
149
// PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64
150
// See ARM DDI 0487E.a, page D13-3258
151
if (sctlr & SCTLR_ELx_DSSBS)
152
new |= PSR_SSBS_BIT;
153
154
// PSTATE.BTYPE is set to zero upon any exception to AArch64
155
// See ARM DDI 0487E.a, pages D1-2293 to D1-2294.
156
157
new |= PSR_D_BIT;
158
new |= PSR_A_BIT;
159
new |= PSR_I_BIT;
160
new |= PSR_F_BIT;
161
162
new |= target_mode;
163
164
*vcpu_cpsr(vcpu) = new;
165
__vcpu_write_spsr(vcpu, target_mode, old);
166
}
167
168
/*
169
* When an exception is taken, most CPSR fields are left unchanged in the
170
* handler. However, some are explicitly overridden (e.g. M[4:0]).
171
*
172
* The SPSR/SPSR_ELx layouts differ, and the below is intended to work with
173
* either format. Note: SPSR.J bit doesn't exist in SPSR_ELx, but this bit was
174
* obsoleted by the ARMv7 virtualization extensions and is RES0.
175
*
176
* For the SPSR layout seen from AArch32, see:
177
* - ARM DDI 0406C.d, page B1-1148
178
* - ARM DDI 0487E.a, page G8-6264
179
*
180
* For the SPSR_ELx layout for AArch32 seen from AArch64, see:
181
* - ARM DDI 0487E.a, page C5-426
182
*
183
* Here we manipulate the fields in order of the AArch32 SPSR_ELx layout, from
184
* MSB to LSB.
185
*/
186
static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode)
187
{
188
u32 sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
189
unsigned long old, new;
190
191
old = *vcpu_cpsr(vcpu);
192
new = 0;
193
194
new |= (old & PSR_AA32_N_BIT);
195
new |= (old & PSR_AA32_Z_BIT);
196
new |= (old & PSR_AA32_C_BIT);
197
new |= (old & PSR_AA32_V_BIT);
198
new |= (old & PSR_AA32_Q_BIT);
199
200
// CPSR.IT[7:0] are set to zero upon any exception
201
// See ARM DDI 0487E.a, section G1.12.3
202
// See ARM DDI 0406C.d, section B1.8.3
203
204
new |= (old & PSR_AA32_DIT_BIT);
205
206
// CPSR.SSBS is set to SCTLR.DSSBS upon any exception
207
// See ARM DDI 0487E.a, page G8-6244
208
if (sctlr & BIT(31))
209
new |= PSR_AA32_SSBS_BIT;
210
211
// CPSR.PAN is unchanged unless SCTLR.SPAN == 0b0
212
// SCTLR.SPAN is RES1 when ARMv8.1-PAN is not implemented
213
// See ARM DDI 0487E.a, page G8-6246
214
new |= (old & PSR_AA32_PAN_BIT);
215
if (!(sctlr & BIT(23)))
216
new |= PSR_AA32_PAN_BIT;
217
218
// SS does not exist in AArch32, so ignore
219
220
// CPSR.IL is set to zero upon any exception
221
// See ARM DDI 0487E.a, page G1-5527
222
223
new |= (old & PSR_AA32_GE_MASK);
224
225
// CPSR.IT[7:0] are set to zero upon any exception
226
// See prior comment above
227
228
// CPSR.E is set to SCTLR.EE upon any exception
229
// See ARM DDI 0487E.a, page G8-6245
230
// See ARM DDI 0406C.d, page B4-1701
231
if (sctlr & BIT(25))
232
new |= PSR_AA32_E_BIT;
233
234
// CPSR.A is unchanged upon an exception to Undefined, Supervisor
235
// CPSR.A is set upon an exception to other modes
236
// See ARM DDI 0487E.a, pages G1-5515 to G1-5516
237
// See ARM DDI 0406C.d, page B1-1182
238
new |= (old & PSR_AA32_A_BIT);
239
if (mode != PSR_AA32_MODE_UND && mode != PSR_AA32_MODE_SVC)
240
new |= PSR_AA32_A_BIT;
241
242
// CPSR.I is set upon any exception
243
// See ARM DDI 0487E.a, pages G1-5515 to G1-5516
244
// See ARM DDI 0406C.d, page B1-1182
245
new |= PSR_AA32_I_BIT;
246
247
// CPSR.F is set upon an exception to FIQ
248
// CPSR.F is unchanged upon an exception to other modes
249
// See ARM DDI 0487E.a, pages G1-5515 to G1-5516
250
// See ARM DDI 0406C.d, page B1-1182
251
new |= (old & PSR_AA32_F_BIT);
252
if (mode == PSR_AA32_MODE_FIQ)
253
new |= PSR_AA32_F_BIT;
254
255
// CPSR.T is set to SCTLR.TE upon any exception
256
// See ARM DDI 0487E.a, page G8-5514
257
// See ARM DDI 0406C.d, page B1-1181
258
if (sctlr & BIT(30))
259
new |= PSR_AA32_T_BIT;
260
261
new |= mode;
262
263
return new;
264
}
265
266
/*
267
* Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
268
*/
269
static const u8 return_offsets[8][2] = {
270
[0] = { 0, 0 }, /* Reset, unused */
271
[1] = { 4, 2 }, /* Undefined */
272
[2] = { 0, 0 }, /* SVC, unused */
273
[3] = { 4, 4 }, /* Prefetch abort */
274
[4] = { 8, 8 }, /* Data abort */
275
[5] = { 0, 0 }, /* HVC, unused */
276
[6] = { 4, 4 }, /* IRQ, unused */
277
[7] = { 4, 4 }, /* FIQ, unused */
278
};
279
280
static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
281
{
282
unsigned long spsr = *vcpu_cpsr(vcpu);
283
bool is_thumb = (spsr & PSR_AA32_T_BIT);
284
u32 sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
285
u32 return_address;
286
287
*vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode);
288
return_address = *vcpu_pc(vcpu);
289
return_address += return_offsets[vect_offset >> 2][is_thumb];
290
291
/* KVM only enters the ABT and UND modes, so only deal with those */
292
switch(mode) {
293
case PSR_AA32_MODE_ABT:
294
__vcpu_write_spsr_abt(vcpu, host_spsr_to_spsr32(spsr));
295
vcpu_gp_regs(vcpu)->compat_lr_abt = return_address;
296
break;
297
298
case PSR_AA32_MODE_UND:
299
__vcpu_write_spsr_und(vcpu, host_spsr_to_spsr32(spsr));
300
vcpu_gp_regs(vcpu)->compat_lr_und = return_address;
301
break;
302
}
303
304
/* Branch to exception vector */
305
if (sctlr & (1 << 13))
306
vect_offset += 0xffff0000;
307
else /* always have security exceptions */
308
vect_offset += __vcpu_read_sys_reg(vcpu, VBAR_EL1);
309
310
*vcpu_pc(vcpu) = vect_offset;
311
}
312
313
static void kvm_inject_exception(struct kvm_vcpu *vcpu)
314
{
315
if (vcpu_el1_is_32bit(vcpu)) {
316
switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
317
case unpack_vcpu_flag(EXCEPT_AA32_UND):
318
enter_exception32(vcpu, PSR_AA32_MODE_UND, 4);
319
break;
320
case unpack_vcpu_flag(EXCEPT_AA32_IABT):
321
enter_exception32(vcpu, PSR_AA32_MODE_ABT, 12);
322
break;
323
case unpack_vcpu_flag(EXCEPT_AA32_DABT):
324
enter_exception32(vcpu, PSR_AA32_MODE_ABT, 16);
325
break;
326
default:
327
/* Err... */
328
break;
329
}
330
} else {
331
switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
332
case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC):
333
enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
334
break;
335
336
case unpack_vcpu_flag(EXCEPT_AA64_EL1_SERR):
337
enter_exception64(vcpu, PSR_MODE_EL1h, except_type_serror);
338
break;
339
340
case unpack_vcpu_flag(EXCEPT_AA64_EL2_SYNC):
341
enter_exception64(vcpu, PSR_MODE_EL2h, except_type_sync);
342
break;
343
344
case unpack_vcpu_flag(EXCEPT_AA64_EL2_IRQ):
345
enter_exception64(vcpu, PSR_MODE_EL2h, except_type_irq);
346
break;
347
348
case unpack_vcpu_flag(EXCEPT_AA64_EL2_SERR):
349
enter_exception64(vcpu, PSR_MODE_EL2h, except_type_serror);
350
break;
351
352
default:
353
/*
354
* Only EL1_{SYNC,SERR} and EL2_{SYNC,IRQ,SERR} makes
355
* sense so far. Everything else gets silently
356
* ignored.
357
*/
358
break;
359
}
360
}
361
}
362
363
/*
364
* Adjust the guest PC (and potentially exception state) depending on
365
* flags provided by the emulation code.
366
*/
367
void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
368
{
369
if (vcpu_get_flag(vcpu, PENDING_EXCEPTION)) {
370
kvm_inject_exception(vcpu);
371
vcpu_clear_flag(vcpu, PENDING_EXCEPTION);
372
vcpu_clear_flag(vcpu, EXCEPT_MASK);
373
} else if (vcpu_get_flag(vcpu, INCREMENT_PC)) {
374
kvm_skip_instr(vcpu);
375
vcpu_clear_flag(vcpu, INCREMENT_PC);
376
}
377
}
378
379