Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/inject_fault.c
50904 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Fault injection for both 32 and 64bit guests.
4
*
5
* Copyright (C) 2012,2013 - ARM Ltd
6
* Author: Marc Zyngier <[email protected]>
7
*
8
* Based on arch/arm/kvm/emulate.c
9
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
10
* Author: Christoffer Dall <[email protected]>
11
*/
12
13
#include <linux/kvm_host.h>
14
#include <asm/kvm_emulate.h>
15
#include <asm/kvm_nested.h>
16
#include <asm/esr.h>
17
18
static unsigned int exception_target_el(struct kvm_vcpu *vcpu)
19
{
20
/* If not nesting, EL1 is the only possible exception target */
21
if (likely(!vcpu_has_nv(vcpu)))
22
return PSR_MODE_EL1h;
23
24
/*
25
* With NV, we need to pick between EL1 and EL2. Note that we
26
* never deal with a nesting exception here, hence never
27
* changing context, and the exception itself can be delayed
28
* until the next entry.
29
*/
30
switch(*vcpu_cpsr(vcpu) & PSR_MODE_MASK) {
31
case PSR_MODE_EL2h:
32
case PSR_MODE_EL2t:
33
return PSR_MODE_EL2h;
34
case PSR_MODE_EL1h:
35
case PSR_MODE_EL1t:
36
return PSR_MODE_EL1h;
37
case PSR_MODE_EL0t:
38
return vcpu_el2_tge_is_set(vcpu) ? PSR_MODE_EL2h : PSR_MODE_EL1h;
39
default:
40
BUG();
41
}
42
}
43
44
static enum vcpu_sysreg exception_esr_elx(struct kvm_vcpu *vcpu)
45
{
46
if (exception_target_el(vcpu) == PSR_MODE_EL2h)
47
return ESR_EL2;
48
49
return ESR_EL1;
50
}
51
52
static enum vcpu_sysreg exception_far_elx(struct kvm_vcpu *vcpu)
53
{
54
if (exception_target_el(vcpu) == PSR_MODE_EL2h)
55
return FAR_EL2;
56
57
return FAR_EL1;
58
}
59
60
static void pend_sync_exception(struct kvm_vcpu *vcpu)
61
{
62
if (exception_target_el(vcpu) == PSR_MODE_EL1h)
63
kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
64
else
65
kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC);
66
}
67
68
static void pend_serror_exception(struct kvm_vcpu *vcpu)
69
{
70
if (exception_target_el(vcpu) == PSR_MODE_EL1h)
71
kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SERR);
72
else
73
kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SERR);
74
}
75
76
static bool __effective_sctlr2_bit(struct kvm_vcpu *vcpu, unsigned int idx)
77
{
78
u64 sctlr2;
79
80
if (!kvm_has_sctlr2(vcpu->kvm))
81
return false;
82
83
if (is_nested_ctxt(vcpu) &&
84
!(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_SCTLR2En))
85
return false;
86
87
if (exception_target_el(vcpu) == PSR_MODE_EL1h)
88
sctlr2 = vcpu_read_sys_reg(vcpu, SCTLR2_EL1);
89
else
90
sctlr2 = vcpu_read_sys_reg(vcpu, SCTLR2_EL2);
91
92
return sctlr2 & BIT(idx);
93
}
94
95
static bool effective_sctlr2_ease(struct kvm_vcpu *vcpu)
96
{
97
return __effective_sctlr2_bit(vcpu, SCTLR2_EL1_EASE_SHIFT);
98
}
99
100
static bool effective_sctlr2_nmea(struct kvm_vcpu *vcpu)
101
{
102
return __effective_sctlr2_bit(vcpu, SCTLR2_EL1_NMEA_SHIFT);
103
}
104
105
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
106
{
107
unsigned long cpsr = *vcpu_cpsr(vcpu);
108
bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
109
u64 esr = 0, fsc;
110
int level;
111
112
/*
113
* If injecting an abort from a failed S1PTW, rewalk the S1 PTs to
114
* find the failing level. If we can't find it, assume the error was
115
* transient and restart without changing the state.
116
*/
117
if (kvm_vcpu_abt_iss1tw(vcpu)) {
118
u64 hpfar = kvm_vcpu_get_fault_ipa(vcpu);
119
int ret;
120
121
if (hpfar == INVALID_GPA)
122
return;
123
124
ret = __kvm_find_s1_desc_level(vcpu, addr, hpfar, &level);
125
if (ret)
126
return;
127
128
WARN_ON_ONCE(level < -1 || level > 3);
129
fsc = ESR_ELx_FSC_SEA_TTW(level);
130
} else {
131
fsc = ESR_ELx_FSC_EXTABT;
132
}
133
134
/* This delight is brought to you by FEAT_DoubleFault2. */
135
if (effective_sctlr2_ease(vcpu))
136
pend_serror_exception(vcpu);
137
else
138
pend_sync_exception(vcpu);
139
140
/*
141
* Build an {i,d}abort, depending on the level and the
142
* instruction set. Report an external synchronous abort.
143
*/
144
if (kvm_vcpu_trap_il_is32bit(vcpu))
145
esr |= ESR_ELx_IL;
146
147
/*
148
* Here, the guest runs in AArch64 mode when in EL1. If we get
149
* an AArch32 fault, it means we managed to trap an EL0 fault.
150
*/
151
if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
152
esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
153
else
154
esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
155
156
if (!is_iabt)
157
esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
158
159
esr |= fsc;
160
161
vcpu_write_sys_reg(vcpu, addr, exception_far_elx(vcpu));
162
vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
163
}
164
165
static void inject_undef64(struct kvm_vcpu *vcpu)
166
{
167
u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
168
169
pend_sync_exception(vcpu);
170
171
/*
172
* Build an unknown exception, depending on the instruction
173
* set.
174
*/
175
if (kvm_vcpu_trap_il_is32bit(vcpu))
176
esr |= ESR_ELx_IL;
177
178
vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
179
}
180
181
#define DFSR_FSC_EXTABT_LPAE 0x10
182
#define DFSR_FSC_EXTABT_nLPAE 0x08
183
#define DFSR_LPAE BIT(9)
184
#define TTBCR_EAE BIT(31)
185
186
static void inject_undef32(struct kvm_vcpu *vcpu)
187
{
188
kvm_pend_exception(vcpu, EXCEPT_AA32_UND);
189
}
190
191
/*
192
* Modelled after TakeDataAbortException() and TakePrefetchAbortException
193
* pseudocode.
194
*/
195
static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
196
{
197
u64 far;
198
u32 fsr;
199
200
/* Give the guest an IMPLEMENTATION DEFINED exception */
201
if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
202
fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
203
} else {
204
/* no need to shuffle FS[4] into DFSR[10] as it's 0 */
205
fsr = DFSR_FSC_EXTABT_nLPAE;
206
}
207
208
far = vcpu_read_sys_reg(vcpu, FAR_EL1);
209
210
if (is_pabt) {
211
kvm_pend_exception(vcpu, EXCEPT_AA32_IABT);
212
far &= GENMASK(31, 0);
213
far |= (u64)addr << 32;
214
vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
215
} else { /* !iabt */
216
kvm_pend_exception(vcpu, EXCEPT_AA32_DABT);
217
far &= GENMASK(63, 32);
218
far |= addr;
219
vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);
220
}
221
222
vcpu_write_sys_reg(vcpu, far, FAR_EL1);
223
}
224
225
static void __kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
226
{
227
if (vcpu_el1_is_32bit(vcpu))
228
inject_abt32(vcpu, iabt, addr);
229
else
230
inject_abt64(vcpu, iabt, addr);
231
}
232
233
static bool kvm_sea_target_is_el2(struct kvm_vcpu *vcpu)
234
{
235
if (__vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_TGE | HCR_TEA))
236
return true;
237
238
if (!vcpu_mode_priv(vcpu))
239
return false;
240
241
return (*vcpu_cpsr(vcpu) & PSR_A_BIT) &&
242
(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA);
243
}
244
245
int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
246
{
247
lockdep_assert_held(&vcpu->mutex);
248
249
if (is_nested_ctxt(vcpu) && kvm_sea_target_is_el2(vcpu))
250
return kvm_inject_nested_sea(vcpu, iabt, addr);
251
252
__kvm_inject_sea(vcpu, iabt, addr);
253
return 1;
254
}
255
256
static int kvm_inject_nested_excl_atomic(struct kvm_vcpu *vcpu, u64 addr)
257
{
258
u64 esr = FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_DABT_LOW) |
259
FIELD_PREP(ESR_ELx_FSC, ESR_ELx_FSC_EXCL_ATOMIC) |
260
ESR_ELx_IL;
261
262
vcpu_write_sys_reg(vcpu, addr, FAR_EL2);
263
return kvm_inject_nested_sync(vcpu, esr);
264
}
265
266
/**
267
* kvm_inject_dabt_excl_atomic - inject a data abort for unsupported exclusive
268
* or atomic access
269
* @vcpu: The VCPU to receive the data abort
270
* @addr: The address to report in the DFAR
271
*
272
* It is assumed that this code is called from the VCPU thread and that the
273
* VCPU therefore is not currently executing guest code.
274
*/
275
int kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, u64 addr)
276
{
277
u64 esr;
278
279
if (is_nested_ctxt(vcpu) && (vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_VM))
280
return kvm_inject_nested_excl_atomic(vcpu, addr);
281
282
__kvm_inject_sea(vcpu, false, addr);
283
esr = vcpu_read_sys_reg(vcpu, exception_esr_elx(vcpu));
284
esr &= ~ESR_ELx_FSC;
285
esr |= ESR_ELx_FSC_EXCL_ATOMIC;
286
vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
287
return 1;
288
}
289
290
void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
291
{
292
unsigned long addr, esr;
293
294
addr = kvm_vcpu_get_fault_ipa(vcpu);
295
addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
296
297
__kvm_inject_sea(vcpu, kvm_vcpu_trap_is_iabt(vcpu), addr);
298
299
/*
300
* If AArch64 or LPAE, set FSC to 0 to indicate an Address
301
* Size Fault at level 0, as if exceeding PARange.
302
*
303
* Non-LPAE guests will only get the external abort, as there
304
* is no way to describe the ASF.
305
*/
306
if (vcpu_el1_is_32bit(vcpu) &&
307
!(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
308
return;
309
310
esr = vcpu_read_sys_reg(vcpu, exception_esr_elx(vcpu));
311
esr &= ~GENMASK_ULL(5, 0);
312
vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
313
}
314
315
/**
316
* kvm_inject_undefined - inject an undefined instruction into the guest
317
* @vcpu: The vCPU in which to inject the exception
318
*
319
* It is assumed that this code is called from the VCPU thread and that the
320
* VCPU therefore is not currently executing guest code.
321
*/
322
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
323
{
324
if (vcpu_el1_is_32bit(vcpu))
325
inject_undef32(vcpu);
326
else
327
inject_undef64(vcpu);
328
}
329
330
static bool serror_is_masked(struct kvm_vcpu *vcpu)
331
{
332
return (*vcpu_cpsr(vcpu) & PSR_A_BIT) && !effective_sctlr2_nmea(vcpu);
333
}
334
335
static bool kvm_serror_target_is_el2(struct kvm_vcpu *vcpu)
336
{
337
if (is_hyp_ctxt(vcpu) || vcpu_el2_amo_is_set(vcpu))
338
return true;
339
340
if (!(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA))
341
return false;
342
343
/*
344
* In another example where FEAT_DoubleFault2 is entirely backwards,
345
* "masked" as it relates to the routing effects of HCRX_EL2.TMEA
346
* doesn't consider SCTLR2_EL1.NMEA. That is to say, even if EL1 asked
347
* for non-maskable SErrors, the EL2 bit takes priority if A is set.
348
*/
349
if (vcpu_mode_priv(vcpu))
350
return *vcpu_cpsr(vcpu) & PSR_A_BIT;
351
352
/*
353
* Otherwise SErrors are considered unmasked when taken from EL0 and
354
* NMEA is set.
355
*/
356
return serror_is_masked(vcpu);
357
}
358
359
static bool kvm_serror_undeliverable_at_el2(struct kvm_vcpu *vcpu)
360
{
361
return !(vcpu_el2_tge_is_set(vcpu) || vcpu_el2_amo_is_set(vcpu));
362
}
363
364
int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr)
365
{
366
lockdep_assert_held(&vcpu->mutex);
367
368
if (is_nested_ctxt(vcpu) && kvm_serror_target_is_el2(vcpu))
369
return kvm_inject_nested_serror(vcpu, esr);
370
371
if (vcpu_is_el2(vcpu) && kvm_serror_undeliverable_at_el2(vcpu)) {
372
vcpu_set_vsesr(vcpu, esr);
373
vcpu_set_flag(vcpu, NESTED_SERROR_PENDING);
374
return 1;
375
}
376
377
/*
378
* Emulate the exception entry if SErrors are unmasked. This is useful if
379
* the vCPU is in a nested context w/ vSErrors enabled then we've already
380
* delegated he hardware vSError context (i.e. HCR_EL2.VSE, VSESR_EL2,
381
* VDISR_EL2) to the guest hypervisor.
382
*
383
* As we're emulating the SError injection we need to explicitly populate
384
* ESR_ELx.EC because hardware will not do it on our behalf.
385
*/
386
if (!serror_is_masked(vcpu)) {
387
pend_serror_exception(vcpu);
388
esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SERROR);
389
vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
390
return 1;
391
}
392
393
vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
394
*vcpu_hcr(vcpu) |= HCR_VSE;
395
return 1;
396
}
397
398