Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/selftests/kvm/arm64/external_aborts.c
38237 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* external_abort - Tests for userspace external abort injection
4
*
5
* Copyright (c) 2024 Google LLC
6
*/
7
#include "processor.h"
8
#include "test_util.h"
9
10
#define MMIO_ADDR 0x8000000ULL
11
#define EXPECTED_SERROR_ISS (ESR_ELx_ISV | 0x1d1ed)
12
13
static u64 expected_abort_pc;
14
15
static void expect_sea_handler(struct ex_regs *regs)
16
{
17
u64 esr = read_sysreg(esr_el1);
18
19
GUEST_ASSERT_EQ(regs->pc, expected_abort_pc);
20
GUEST_ASSERT_EQ(ESR_ELx_EC(esr), ESR_ELx_EC_DABT_CUR);
21
GUEST_ASSERT_EQ(esr & ESR_ELx_FSC_TYPE, ESR_ELx_FSC_EXTABT);
22
23
GUEST_DONE();
24
}
25
26
static void unexpected_dabt_handler(struct ex_regs *regs)
27
{
28
GUEST_FAIL("Unexpected data abort at PC: %lx\n", regs->pc);
29
}
30
31
static struct kvm_vm *vm_create_with_dabt_handler(struct kvm_vcpu **vcpu, void *guest_code,
32
handler_fn dabt_handler)
33
{
34
struct kvm_vm *vm = vm_create_with_one_vcpu(vcpu, guest_code);
35
36
vm_init_descriptor_tables(vm);
37
vcpu_init_descriptor_tables(*vcpu);
38
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, ESR_ELx_EC_DABT_CUR, dabt_handler);
39
40
virt_map(vm, MMIO_ADDR, MMIO_ADDR, 1);
41
42
return vm;
43
}
44
45
static void vcpu_inject_sea(struct kvm_vcpu *vcpu)
46
{
47
struct kvm_vcpu_events events = {};
48
49
events.exception.ext_dabt_pending = true;
50
vcpu_events_set(vcpu, &events);
51
}
52
53
static bool vcpu_has_ras(struct kvm_vcpu *vcpu)
54
{
55
u64 pfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
56
57
return SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0);
58
}
59
60
static bool guest_has_ras(void)
61
{
62
return SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, read_sysreg(id_aa64pfr0_el1));
63
}
64
65
static void vcpu_inject_serror(struct kvm_vcpu *vcpu)
66
{
67
struct kvm_vcpu_events events = {};
68
69
events.exception.serror_pending = true;
70
if (vcpu_has_ras(vcpu)) {
71
events.exception.serror_has_esr = true;
72
events.exception.serror_esr = EXPECTED_SERROR_ISS;
73
}
74
75
vcpu_events_set(vcpu, &events);
76
}
77
78
static void __vcpu_run_expect(struct kvm_vcpu *vcpu, unsigned int cmd)
79
{
80
struct ucall uc;
81
82
vcpu_run(vcpu);
83
switch (get_ucall(vcpu, &uc)) {
84
case UCALL_ABORT:
85
REPORT_GUEST_ASSERT(uc);
86
break;
87
default:
88
if (uc.cmd == cmd)
89
return;
90
91
TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
92
}
93
}
94
95
static void vcpu_run_expect_done(struct kvm_vcpu *vcpu)
96
{
97
__vcpu_run_expect(vcpu, UCALL_DONE);
98
}
99
100
static void vcpu_run_expect_sync(struct kvm_vcpu *vcpu)
101
{
102
__vcpu_run_expect(vcpu, UCALL_SYNC);
103
}
104
105
extern char test_mmio_abort_insn;
106
107
static noinline void test_mmio_abort_guest(void)
108
{
109
WRITE_ONCE(expected_abort_pc, (u64)&test_mmio_abort_insn);
110
111
asm volatile("test_mmio_abort_insn:\n\t"
112
"ldr x0, [%0]\n\t"
113
: : "r" (MMIO_ADDR) : "x0", "memory");
114
115
GUEST_FAIL("MMIO instruction should not retire");
116
}
117
118
/*
119
* Test that KVM doesn't complete MMIO emulation when userspace has made an
120
* external abort pending for the instruction.
121
*/
122
static void test_mmio_abort(void)
123
{
124
struct kvm_vcpu *vcpu;
125
struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_abort_guest,
126
expect_sea_handler);
127
struct kvm_run *run = vcpu->run;
128
129
vcpu_run(vcpu);
130
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_MMIO);
131
TEST_ASSERT_EQ(run->mmio.phys_addr, MMIO_ADDR);
132
TEST_ASSERT_EQ(run->mmio.len, sizeof(unsigned long));
133
TEST_ASSERT(!run->mmio.is_write, "Expected MMIO read");
134
135
vcpu_inject_sea(vcpu);
136
vcpu_run_expect_done(vcpu);
137
kvm_vm_free(vm);
138
}
139
140
extern char test_mmio_nisv_insn;
141
142
static void test_mmio_nisv_guest(void)
143
{
144
WRITE_ONCE(expected_abort_pc, (u64)&test_mmio_nisv_insn);
145
146
asm volatile("test_mmio_nisv_insn:\n\t"
147
"ldr x0, [%0], #8\n\t"
148
: : "r" (MMIO_ADDR) : "x0", "memory");
149
150
GUEST_FAIL("MMIO instruction should not retire");
151
}
152
153
/*
154
* Test that the KVM_RUN ioctl fails for ESR_EL2.ISV=0 MMIO aborts if userspace
155
* hasn't enabled KVM_CAP_ARM_NISV_TO_USER.
156
*/
157
static void test_mmio_nisv(void)
158
{
159
struct kvm_vcpu *vcpu;
160
struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_nisv_guest,
161
unexpected_dabt_handler);
162
163
TEST_ASSERT(_vcpu_run(vcpu), "Expected nonzero return code from KVM_RUN");
164
TEST_ASSERT_EQ(errno, ENOSYS);
165
166
kvm_vm_free(vm);
167
}
168
169
/*
170
* Test that ESR_EL2.ISV=0 MMIO aborts reach userspace and that an injected SEA
171
* reaches the guest.
172
*/
173
static void test_mmio_nisv_abort(void)
174
{
175
struct kvm_vcpu *vcpu;
176
struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_nisv_guest,
177
expect_sea_handler);
178
struct kvm_run *run = vcpu->run;
179
180
vm_enable_cap(vm, KVM_CAP_ARM_NISV_TO_USER, 1);
181
182
vcpu_run(vcpu);
183
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_ARM_NISV);
184
TEST_ASSERT_EQ(run->arm_nisv.fault_ipa, MMIO_ADDR);
185
186
vcpu_inject_sea(vcpu);
187
vcpu_run_expect_done(vcpu);
188
kvm_vm_free(vm);
189
}
190
191
static void unexpected_serror_handler(struct ex_regs *regs)
192
{
193
GUEST_FAIL("Took unexpected SError exception");
194
}
195
196
static void test_serror_masked_guest(void)
197
{
198
GUEST_ASSERT(read_sysreg(isr_el1) & ISR_EL1_A);
199
200
isb();
201
202
GUEST_DONE();
203
}
204
205
static void test_serror_masked(void)
206
{
207
struct kvm_vcpu *vcpu;
208
struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_serror_masked_guest,
209
unexpected_dabt_handler);
210
211
vm_install_exception_handler(vm, VECTOR_ERROR_CURRENT, unexpected_serror_handler);
212
213
vcpu_inject_serror(vcpu);
214
vcpu_run_expect_done(vcpu);
215
kvm_vm_free(vm);
216
}
217
218
static void expect_serror_handler(struct ex_regs *regs)
219
{
220
u64 esr = read_sysreg(esr_el1);
221
222
GUEST_ASSERT_EQ(ESR_ELx_EC(esr), ESR_ELx_EC_SERROR);
223
if (guest_has_ras())
224
GUEST_ASSERT_EQ(ESR_ELx_ISS(esr), EXPECTED_SERROR_ISS);
225
226
GUEST_DONE();
227
}
228
229
static void test_serror_guest(void)
230
{
231
GUEST_ASSERT(read_sysreg(isr_el1) & ISR_EL1_A);
232
233
local_serror_enable();
234
isb();
235
local_serror_disable();
236
237
GUEST_FAIL("Should've taken pending SError exception");
238
}
239
240
static void test_serror(void)
241
{
242
struct kvm_vcpu *vcpu;
243
struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_serror_guest,
244
unexpected_dabt_handler);
245
246
vm_install_exception_handler(vm, VECTOR_ERROR_CURRENT, expect_serror_handler);
247
248
vcpu_inject_serror(vcpu);
249
vcpu_run_expect_done(vcpu);
250
kvm_vm_free(vm);
251
}
252
253
static void expect_sea_s1ptw_handler(struct ex_regs *regs)
254
{
255
u64 esr = read_sysreg(esr_el1);
256
257
GUEST_ASSERT_EQ(regs->pc, expected_abort_pc);
258
GUEST_ASSERT_EQ(ESR_ELx_EC(esr), ESR_ELx_EC_DABT_CUR);
259
GUEST_ASSERT_EQ((esr & ESR_ELx_FSC), ESR_ELx_FSC_SEA_TTW(3));
260
261
GUEST_DONE();
262
}
263
264
static noinline void test_s1ptw_abort_guest(void)
265
{
266
extern char test_s1ptw_abort_insn;
267
268
WRITE_ONCE(expected_abort_pc, (u64)&test_s1ptw_abort_insn);
269
270
asm volatile("test_s1ptw_abort_insn:\n\t"
271
"ldr x0, [%0]\n\t"
272
: : "r" (MMIO_ADDR) : "x0", "memory");
273
274
GUEST_FAIL("Load on S1PTW abort should not retire");
275
}
276
277
static void test_s1ptw_abort(void)
278
{
279
struct kvm_vcpu *vcpu;
280
u64 *ptep, bad_pa;
281
struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_s1ptw_abort_guest,
282
expect_sea_s1ptw_handler);
283
284
ptep = virt_get_pte_hva_at_level(vm, MMIO_ADDR, 2);
285
bad_pa = BIT(vm->pa_bits) - vm->page_size;
286
287
*ptep &= ~GENMASK(47, 12);
288
*ptep |= bad_pa;
289
290
vcpu_run_expect_done(vcpu);
291
kvm_vm_free(vm);
292
}
293
294
static void test_serror_emulated_guest(void)
295
{
296
GUEST_ASSERT(!(read_sysreg(isr_el1) & ISR_EL1_A));
297
298
local_serror_enable();
299
GUEST_SYNC(0);
300
local_serror_disable();
301
302
GUEST_FAIL("Should've taken unmasked SError exception");
303
}
304
305
static void test_serror_emulated(void)
306
{
307
struct kvm_vcpu *vcpu;
308
struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_serror_emulated_guest,
309
unexpected_dabt_handler);
310
311
vm_install_exception_handler(vm, VECTOR_ERROR_CURRENT, expect_serror_handler);
312
313
vcpu_run_expect_sync(vcpu);
314
vcpu_inject_serror(vcpu);
315
vcpu_run_expect_done(vcpu);
316
kvm_vm_free(vm);
317
}
318
319
static void test_mmio_ease_guest(void)
320
{
321
sysreg_clear_set_s(SYS_SCTLR2_EL1, 0, SCTLR2_EL1_EASE);
322
isb();
323
324
test_mmio_abort_guest();
325
}
326
327
/*
328
* Test that KVM doesn't complete MMIO emulation when userspace has made an
329
* external abort pending for the instruction.
330
*/
331
static void test_mmio_ease(void)
332
{
333
struct kvm_vcpu *vcpu;
334
struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_ease_guest,
335
unexpected_dabt_handler);
336
struct kvm_run *run = vcpu->run;
337
u64 pfr1;
338
339
pfr1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
340
if (!SYS_FIELD_GET(ID_AA64PFR1_EL1, DF2, pfr1)) {
341
pr_debug("Skipping %s\n", __func__);
342
return;
343
}
344
345
/*
346
* SCTLR2_ELx.EASE changes the exception vector to the SError vector but
347
* doesn't further modify the exception context (e.g. ESR_ELx, FAR_ELx).
348
*/
349
vm_install_exception_handler(vm, VECTOR_ERROR_CURRENT, expect_sea_handler);
350
351
vcpu_run(vcpu);
352
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_MMIO);
353
TEST_ASSERT_EQ(run->mmio.phys_addr, MMIO_ADDR);
354
TEST_ASSERT_EQ(run->mmio.len, sizeof(unsigned long));
355
TEST_ASSERT(!run->mmio.is_write, "Expected MMIO read");
356
357
vcpu_inject_sea(vcpu);
358
vcpu_run_expect_done(vcpu);
359
kvm_vm_free(vm);
360
}
361
362
static void test_serror_amo_guest(void)
363
{
364
/*
365
* The ISB is entirely unnecessary (and highlights how FEAT_NV2 is borked)
366
* since the write is redirected to memory. But don't write (intentionally)
367
* broken code!
368
*/
369
sysreg_clear_set(hcr_el2, HCR_EL2_AMO | HCR_EL2_TGE, 0);
370
isb();
371
372
GUEST_SYNC(0);
373
GUEST_ASSERT(read_sysreg(isr_el1) & ISR_EL1_A);
374
375
/*
376
* KVM treats the effective value of AMO as 1 when
377
* HCR_EL2.{E2H,TGE} = {1, 0}, meaning the SError will be taken when
378
* unmasked.
379
*/
380
local_serror_enable();
381
isb();
382
local_serror_disable();
383
384
GUEST_FAIL("Should've taken pending SError exception");
385
}
386
387
static void test_serror_amo(void)
388
{
389
struct kvm_vcpu *vcpu;
390
struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_serror_amo_guest,
391
unexpected_dabt_handler);
392
393
vm_install_exception_handler(vm, VECTOR_ERROR_CURRENT, expect_serror_handler);
394
vcpu_run_expect_sync(vcpu);
395
vcpu_inject_serror(vcpu);
396
vcpu_run_expect_done(vcpu);
397
kvm_vm_free(vm);
398
}
399
400
int main(void)
401
{
402
test_mmio_abort();
403
test_mmio_nisv();
404
test_mmio_nisv_abort();
405
test_serror();
406
test_serror_masked();
407
test_serror_emulated();
408
test_mmio_ease();
409
test_s1ptw_abort();
410
411
if (!test_supports_el2())
412
return 0;
413
414
test_serror_amo();
415
}
416
417