Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/selftests/kvm/arm64/page_fault_test.c
38237 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* page_fault_test.c - Test stage 2 faults.
4
*
5
* This test tries different combinations of guest accesses (e.g., write,
6
* S1PTW), backing source type (e.g., anon) and types of faults (e.g., read on
7
* hugetlbfs with a hole). It checks that the expected handling method is
8
* called (e.g., uffd faults with the right address and write/read flag).
9
*/
10
#include <linux/bitmap.h>
11
#include <fcntl.h>
12
#include <test_util.h>
13
#include <kvm_util.h>
14
#include <processor.h>
15
#include <asm/sysreg.h>
16
#include <linux/bitfield.h>
17
#include "guest_modes.h"
18
#include "userfaultfd_util.h"
19
20
/* Guest virtual addresses that point to the test page and its PTE. */
21
#define TEST_GVA 0xc0000000
22
#define TEST_EXEC_GVA (TEST_GVA + 0x8)
23
#define TEST_PTE_GVA 0xb0000000
24
#define TEST_DATA 0x0123456789ABCDEF
25
26
static uint64_t *guest_test_memory = (uint64_t *)TEST_GVA;
27
28
#define CMD_NONE (0)
29
#define CMD_SKIP_TEST (1ULL << 1)
30
#define CMD_HOLE_PT (1ULL << 2)
31
#define CMD_HOLE_DATA (1ULL << 3)
32
#define CMD_CHECK_WRITE_IN_DIRTY_LOG (1ULL << 4)
33
#define CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG (1ULL << 5)
34
#define CMD_CHECK_NO_WRITE_IN_DIRTY_LOG (1ULL << 6)
35
#define CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG (1ULL << 7)
36
#define CMD_SET_PTE_AF (1ULL << 8)
37
38
#define PREPARE_FN_NR 10
39
#define CHECK_FN_NR 10
40
41
static struct event_cnt {
42
int mmio_exits;
43
int fail_vcpu_runs;
44
int uffd_faults;
45
/* uffd_faults is incremented from multiple threads. */
46
pthread_mutex_t uffd_faults_mutex;
47
} events;
48
49
struct test_desc {
50
const char *name;
51
uint64_t mem_mark_cmd;
52
/* Skip the test if any prepare function returns false */
53
bool (*guest_prepare[PREPARE_FN_NR])(void);
54
void (*guest_test)(void);
55
void (*guest_test_check[CHECK_FN_NR])(void);
56
uffd_handler_t uffd_pt_handler;
57
uffd_handler_t uffd_data_handler;
58
void (*dabt_handler)(struct ex_regs *regs);
59
void (*iabt_handler)(struct ex_regs *regs);
60
void (*mmio_handler)(struct kvm_vm *vm, struct kvm_run *run);
61
void (*fail_vcpu_run_handler)(int ret);
62
uint32_t pt_memslot_flags;
63
uint32_t data_memslot_flags;
64
bool skip;
65
struct event_cnt expected_events;
66
};
67
68
struct test_params {
69
enum vm_mem_backing_src_type src_type;
70
struct test_desc *test_desc;
71
};
72
73
static inline void flush_tlb_page(uint64_t vaddr)
74
{
75
uint64_t page = vaddr >> 12;
76
77
dsb(ishst);
78
asm volatile("tlbi vaae1is, %0" :: "r" (page));
79
dsb(ish);
80
isb();
81
}
82
83
static void guest_write64(void)
84
{
85
uint64_t val;
86
87
WRITE_ONCE(*guest_test_memory, TEST_DATA);
88
val = READ_ONCE(*guest_test_memory);
89
GUEST_ASSERT_EQ(val, TEST_DATA);
90
}
91
92
/* Check the system for atomic instructions. */
93
static bool guest_check_lse(void)
94
{
95
uint64_t isar0 = read_sysreg(id_aa64isar0_el1);
96
uint64_t atomic;
97
98
atomic = FIELD_GET(ID_AA64ISAR0_EL1_ATOMIC, isar0);
99
return atomic >= 2;
100
}
101
102
static bool guest_check_dc_zva(void)
103
{
104
uint64_t dczid = read_sysreg(dczid_el0);
105
uint64_t dzp = FIELD_GET(DCZID_EL0_DZP, dczid);
106
107
return dzp == 0;
108
}
109
110
/* Compare and swap instruction. */
111
static void guest_cas(void)
112
{
113
uint64_t val;
114
115
GUEST_ASSERT(guest_check_lse());
116
asm volatile(".arch_extension lse\n"
117
"casal %0, %1, [%2]\n"
118
:: "r" (0ul), "r" (TEST_DATA), "r" (guest_test_memory));
119
val = READ_ONCE(*guest_test_memory);
120
GUEST_ASSERT_EQ(val, TEST_DATA);
121
}
122
123
static void guest_read64(void)
124
{
125
uint64_t val;
126
127
val = READ_ONCE(*guest_test_memory);
128
GUEST_ASSERT_EQ(val, 0);
129
}
130
131
/* Address translation instruction */
132
static void guest_at(void)
133
{
134
uint64_t par;
135
136
asm volatile("at s1e1r, %0" :: "r" (guest_test_memory));
137
isb();
138
par = read_sysreg(par_el1);
139
140
/* Bit 1 indicates whether the AT was successful */
141
GUEST_ASSERT_EQ(par & 1, 0);
142
}
143
144
/*
145
* The size of the block written by "dc zva" is guaranteed to be between (2 <<
146
* 0) and (2 << 9), which is safe in our case as we need the write to happen
147
* for at least a word, and not more than a page.
148
*/
149
static void guest_dc_zva(void)
150
{
151
uint16_t val;
152
153
asm volatile("dc zva, %0" :: "r" (guest_test_memory));
154
dsb(ish);
155
val = READ_ONCE(*guest_test_memory);
156
GUEST_ASSERT_EQ(val, 0);
157
}
158
159
/*
160
* Pre-indexing loads and stores don't have a valid syndrome (ESR_EL2.ISV==0).
161
* And that's special because KVM must take special care with those: they
162
* should still count as accesses for dirty logging or user-faulting, but
163
* should be handled differently on mmio.
164
*/
165
static void guest_ld_preidx(void)
166
{
167
uint64_t val;
168
uint64_t addr = TEST_GVA - 8;
169
170
/*
171
* This ends up accessing "TEST_GVA + 8 - 8", where "TEST_GVA - 8" is
172
* in a gap between memslots not backing by anything.
173
*/
174
asm volatile("ldr %0, [%1, #8]!"
175
: "=r" (val), "+r" (addr));
176
GUEST_ASSERT_EQ(val, 0);
177
GUEST_ASSERT_EQ(addr, TEST_GVA);
178
}
179
180
static void guest_st_preidx(void)
181
{
182
uint64_t val = TEST_DATA;
183
uint64_t addr = TEST_GVA - 8;
184
185
asm volatile("str %0, [%1, #8]!"
186
: "+r" (val), "+r" (addr));
187
188
GUEST_ASSERT_EQ(addr, TEST_GVA);
189
val = READ_ONCE(*guest_test_memory);
190
}
191
192
static bool guest_set_ha(void)
193
{
194
uint64_t mmfr1 = read_sysreg(id_aa64mmfr1_el1);
195
uint64_t hadbs, tcr;
196
197
/* Skip if HA is not supported. */
198
hadbs = FIELD_GET(ID_AA64MMFR1_EL1_HAFDBS, mmfr1);
199
if (hadbs == 0)
200
return false;
201
202
tcr = read_sysreg(tcr_el1) | TCR_HA;
203
write_sysreg(tcr, tcr_el1);
204
isb();
205
206
return true;
207
}
208
209
static bool guest_clear_pte_af(void)
210
{
211
*((uint64_t *)TEST_PTE_GVA) &= ~PTE_AF;
212
flush_tlb_page(TEST_GVA);
213
214
return true;
215
}
216
217
static void guest_check_pte_af(void)
218
{
219
dsb(ish);
220
GUEST_ASSERT_EQ(*((uint64_t *)TEST_PTE_GVA) & PTE_AF, PTE_AF);
221
}
222
223
static void guest_check_write_in_dirty_log(void)
224
{
225
GUEST_SYNC(CMD_CHECK_WRITE_IN_DIRTY_LOG);
226
}
227
228
static void guest_check_no_write_in_dirty_log(void)
229
{
230
GUEST_SYNC(CMD_CHECK_NO_WRITE_IN_DIRTY_LOG);
231
}
232
233
static void guest_check_s1ptw_wr_in_dirty_log(void)
234
{
235
GUEST_SYNC(CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG);
236
}
237
238
static void guest_check_no_s1ptw_wr_in_dirty_log(void)
239
{
240
GUEST_SYNC(CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG);
241
}
242
243
static void guest_exec(void)
244
{
245
int (*code)(void) = (int (*)(void))TEST_EXEC_GVA;
246
int ret;
247
248
ret = code();
249
GUEST_ASSERT_EQ(ret, 0x77);
250
}
251
252
static bool guest_prepare(struct test_desc *test)
253
{
254
bool (*prepare_fn)(void);
255
int i;
256
257
for (i = 0; i < PREPARE_FN_NR; i++) {
258
prepare_fn = test->guest_prepare[i];
259
if (prepare_fn && !prepare_fn())
260
return false;
261
}
262
263
return true;
264
}
265
266
static void guest_test_check(struct test_desc *test)
267
{
268
void (*check_fn)(void);
269
int i;
270
271
for (i = 0; i < CHECK_FN_NR; i++) {
272
check_fn = test->guest_test_check[i];
273
if (check_fn)
274
check_fn();
275
}
276
}
277
278
static void guest_code(struct test_desc *test)
279
{
280
if (!guest_prepare(test))
281
GUEST_SYNC(CMD_SKIP_TEST);
282
283
GUEST_SYNC(test->mem_mark_cmd);
284
285
if (test->guest_test)
286
test->guest_test();
287
288
guest_test_check(test);
289
GUEST_DONE();
290
}
291
292
static void no_dabt_handler(struct ex_regs *regs)
293
{
294
GUEST_FAIL("Unexpected dabt, far_el1 = 0x%lx", read_sysreg(far_el1));
295
}
296
297
static void no_iabt_handler(struct ex_regs *regs)
298
{
299
GUEST_FAIL("Unexpected iabt, pc = 0x%lx", regs->pc);
300
}
301
302
static struct uffd_args {
303
char *copy;
304
void *hva;
305
uint64_t paging_size;
306
} pt_args, data_args;
307
308
/* Returns true to continue the test, and false if it should be skipped. */
309
static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
310
struct uffd_args *args)
311
{
312
uint64_t addr = msg->arg.pagefault.address;
313
uint64_t flags = msg->arg.pagefault.flags;
314
struct uffdio_copy copy;
315
int ret;
316
317
TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING,
318
"The only expected UFFD mode is MISSING");
319
TEST_ASSERT_EQ(addr, (uint64_t)args->hva);
320
321
pr_debug("uffd fault: addr=%p write=%d\n",
322
(void *)addr, !!(flags & UFFD_PAGEFAULT_FLAG_WRITE));
323
324
copy.src = (uint64_t)args->copy;
325
copy.dst = addr;
326
copy.len = args->paging_size;
327
copy.mode = 0;
328
329
ret = ioctl(uffd, UFFDIO_COPY, &copy);
330
if (ret == -1) {
331
pr_info("Failed UFFDIO_COPY in 0x%lx with errno: %d\n",
332
addr, errno);
333
return ret;
334
}
335
336
pthread_mutex_lock(&events.uffd_faults_mutex);
337
events.uffd_faults += 1;
338
pthread_mutex_unlock(&events.uffd_faults_mutex);
339
return 0;
340
}
341
342
static int uffd_pt_handler(int mode, int uffd, struct uffd_msg *msg)
343
{
344
return uffd_generic_handler(mode, uffd, msg, &pt_args);
345
}
346
347
static int uffd_data_handler(int mode, int uffd, struct uffd_msg *msg)
348
{
349
return uffd_generic_handler(mode, uffd, msg, &data_args);
350
}
351
352
static void setup_uffd_args(struct userspace_mem_region *region,
353
struct uffd_args *args)
354
{
355
args->hva = (void *)region->region.userspace_addr;
356
args->paging_size = region->region.memory_size;
357
358
args->copy = malloc(args->paging_size);
359
TEST_ASSERT(args->copy, "Failed to allocate data copy.");
360
memcpy(args->copy, args->hva, args->paging_size);
361
}
362
363
static void setup_uffd(struct kvm_vm *vm, struct test_params *p,
364
struct uffd_desc **pt_uffd, struct uffd_desc **data_uffd)
365
{
366
struct test_desc *test = p->test_desc;
367
int uffd_mode = UFFDIO_REGISTER_MODE_MISSING;
368
369
setup_uffd_args(vm_get_mem_region(vm, MEM_REGION_PT), &pt_args);
370
setup_uffd_args(vm_get_mem_region(vm, MEM_REGION_TEST_DATA), &data_args);
371
372
*pt_uffd = NULL;
373
if (test->uffd_pt_handler)
374
*pt_uffd = uffd_setup_demand_paging(uffd_mode, 0,
375
pt_args.hva,
376
pt_args.paging_size,
377
1, test->uffd_pt_handler);
378
379
*data_uffd = NULL;
380
if (test->uffd_data_handler)
381
*data_uffd = uffd_setup_demand_paging(uffd_mode, 0,
382
data_args.hva,
383
data_args.paging_size,
384
1, test->uffd_data_handler);
385
}
386
387
static void free_uffd(struct test_desc *test, struct uffd_desc *pt_uffd,
388
struct uffd_desc *data_uffd)
389
{
390
if (test->uffd_pt_handler)
391
uffd_stop_demand_paging(pt_uffd);
392
if (test->uffd_data_handler)
393
uffd_stop_demand_paging(data_uffd);
394
395
free(pt_args.copy);
396
free(data_args.copy);
397
}
398
399
static int uffd_no_handler(int mode, int uffd, struct uffd_msg *msg)
400
{
401
TEST_FAIL("There was no UFFD fault expected.");
402
return -1;
403
}
404
405
/* Returns false if the test should be skipped. */
406
static bool punch_hole_in_backing_store(struct kvm_vm *vm,
407
struct userspace_mem_region *region)
408
{
409
void *hva = (void *)region->region.userspace_addr;
410
uint64_t paging_size = region->region.memory_size;
411
int ret, fd = region->fd;
412
413
if (fd != -1) {
414
ret = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
415
0, paging_size);
416
TEST_ASSERT(ret == 0, "fallocate failed");
417
} else {
418
ret = madvise(hva, paging_size, MADV_DONTNEED);
419
TEST_ASSERT(ret == 0, "madvise failed");
420
}
421
422
return true;
423
}
424
425
static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run)
426
{
427
struct userspace_mem_region *region;
428
void *hva;
429
430
region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
431
hva = (void *)region->region.userspace_addr;
432
433
TEST_ASSERT_EQ(run->mmio.phys_addr, region->region.guest_phys_addr);
434
435
memcpy(hva, run->mmio.data, run->mmio.len);
436
events.mmio_exits += 1;
437
}
438
439
static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run)
440
{
441
uint64_t data;
442
443
memcpy(&data, run->mmio.data, sizeof(data));
444
pr_debug("addr=%lld len=%d w=%d data=%lx\n",
445
run->mmio.phys_addr, run->mmio.len,
446
run->mmio.is_write, data);
447
TEST_FAIL("There was no MMIO exit expected.");
448
}
449
450
static bool check_write_in_dirty_log(struct kvm_vm *vm,
451
struct userspace_mem_region *region,
452
uint64_t host_pg_nr)
453
{
454
unsigned long *bmap;
455
bool first_page_dirty;
456
uint64_t size = region->region.memory_size;
457
458
/* getpage_size() is not always equal to vm->page_size */
459
bmap = bitmap_zalloc(size / getpagesize());
460
kvm_vm_get_dirty_log(vm, region->region.slot, bmap);
461
first_page_dirty = test_bit(host_pg_nr, bmap);
462
free(bmap);
463
return first_page_dirty;
464
}
465
466
/* Returns true to continue the test, and false if it should be skipped. */
467
static bool handle_cmd(struct kvm_vm *vm, int cmd)
468
{
469
struct userspace_mem_region *data_region, *pt_region;
470
bool continue_test = true;
471
uint64_t pte_gpa, pte_pg;
472
473
data_region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
474
pt_region = vm_get_mem_region(vm, MEM_REGION_PT);
475
pte_gpa = addr_hva2gpa(vm, virt_get_pte_hva(vm, TEST_GVA));
476
pte_pg = (pte_gpa - pt_region->region.guest_phys_addr) / getpagesize();
477
478
if (cmd == CMD_SKIP_TEST)
479
continue_test = false;
480
481
if (cmd & CMD_HOLE_PT)
482
continue_test = punch_hole_in_backing_store(vm, pt_region);
483
if (cmd & CMD_HOLE_DATA)
484
continue_test = punch_hole_in_backing_store(vm, data_region);
485
if (cmd & CMD_CHECK_WRITE_IN_DIRTY_LOG)
486
TEST_ASSERT(check_write_in_dirty_log(vm, data_region, 0),
487
"Missing write in dirty log");
488
if (cmd & CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG)
489
TEST_ASSERT(check_write_in_dirty_log(vm, pt_region, pte_pg),
490
"Missing s1ptw write in dirty log");
491
if (cmd & CMD_CHECK_NO_WRITE_IN_DIRTY_LOG)
492
TEST_ASSERT(!check_write_in_dirty_log(vm, data_region, 0),
493
"Unexpected write in dirty log");
494
if (cmd & CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG)
495
TEST_ASSERT(!check_write_in_dirty_log(vm, pt_region, pte_pg),
496
"Unexpected s1ptw write in dirty log");
497
498
return continue_test;
499
}
500
501
void fail_vcpu_run_no_handler(int ret)
502
{
503
TEST_FAIL("Unexpected vcpu run failure");
504
}
505
506
void fail_vcpu_run_mmio_no_syndrome_handler(int ret)
507
{
508
TEST_ASSERT(errno == ENOSYS,
509
"The mmio handler should have returned not implemented.");
510
events.fail_vcpu_runs += 1;
511
}
512
513
typedef uint32_t aarch64_insn_t;
514
extern aarch64_insn_t __exec_test[2];
515
516
noinline void __return_0x77(void)
517
{
518
asm volatile("__exec_test: mov x0, #0x77\n"
519
"ret\n");
520
}
521
522
/*
523
* Note that this function runs on the host before the test VM starts: there's
524
* no need to sync the D$ and I$ caches.
525
*/
526
static void load_exec_code_for_test(struct kvm_vm *vm)
527
{
528
uint64_t *code;
529
struct userspace_mem_region *region;
530
void *hva;
531
532
region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
533
hva = (void *)region->region.userspace_addr;
534
535
assert(TEST_EXEC_GVA > TEST_GVA);
536
code = hva + TEST_EXEC_GVA - TEST_GVA;
537
memcpy(code, __exec_test, sizeof(__exec_test));
538
}
539
540
static void setup_abort_handlers(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
541
struct test_desc *test)
542
{
543
vm_init_descriptor_tables(vm);
544
vcpu_init_descriptor_tables(vcpu);
545
546
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
547
ESR_ELx_EC_DABT_CUR, no_dabt_handler);
548
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
549
ESR_ELx_EC_IABT_CUR, no_iabt_handler);
550
}
551
552
static void setup_gva_maps(struct kvm_vm *vm)
553
{
554
struct userspace_mem_region *region;
555
uint64_t pte_gpa;
556
557
region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
558
/* Map TEST_GVA first. This will install a new PTE. */
559
virt_pg_map(vm, TEST_GVA, region->region.guest_phys_addr);
560
/* Then map TEST_PTE_GVA to the above PTE. */
561
pte_gpa = addr_hva2gpa(vm, virt_get_pte_hva(vm, TEST_GVA));
562
virt_pg_map(vm, TEST_PTE_GVA, pte_gpa);
563
}
564
565
enum pf_test_memslots {
566
CODE_AND_DATA_MEMSLOT,
567
PAGE_TABLE_MEMSLOT,
568
TEST_DATA_MEMSLOT,
569
};
570
571
/*
572
* Create a memslot for code and data at pfn=0, and test-data and PT ones
573
* at max_gfn.
574
*/
575
static void setup_memslots(struct kvm_vm *vm, struct test_params *p)
576
{
577
uint64_t backing_src_pagesz = get_backing_src_pagesz(p->src_type);
578
uint64_t guest_page_size = vm->page_size;
579
uint64_t max_gfn = vm_compute_max_gfn(vm);
580
/* Enough for 2M of code when using 4K guest pages. */
581
uint64_t code_npages = 512;
582
uint64_t pt_size, data_size, data_gpa;
583
584
/*
585
* This test requires 1 pgd, 2 pud, 4 pmd, and 6 pte pages when using
586
* VM_MODE_P48V48_4K. Note that the .text takes ~1.6MBs. That's 13
587
* pages. VM_MODE_P48V48_4K is the mode with most PT pages; let's use
588
* twice that just in case.
589
*/
590
pt_size = 26 * guest_page_size;
591
592
/* memslot sizes and gpa's must be aligned to the backing page size */
593
pt_size = align_up(pt_size, backing_src_pagesz);
594
data_size = align_up(guest_page_size, backing_src_pagesz);
595
data_gpa = (max_gfn * guest_page_size) - data_size;
596
data_gpa = align_down(data_gpa, backing_src_pagesz);
597
598
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0,
599
CODE_AND_DATA_MEMSLOT, code_npages, 0);
600
vm->memslots[MEM_REGION_CODE] = CODE_AND_DATA_MEMSLOT;
601
vm->memslots[MEM_REGION_DATA] = CODE_AND_DATA_MEMSLOT;
602
603
vm_userspace_mem_region_add(vm, p->src_type, data_gpa - pt_size,
604
PAGE_TABLE_MEMSLOT, pt_size / guest_page_size,
605
p->test_desc->pt_memslot_flags);
606
vm->memslots[MEM_REGION_PT] = PAGE_TABLE_MEMSLOT;
607
608
vm_userspace_mem_region_add(vm, p->src_type, data_gpa, TEST_DATA_MEMSLOT,
609
data_size / guest_page_size,
610
p->test_desc->data_memslot_flags);
611
vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT;
612
}
613
614
static void setup_ucall(struct kvm_vm *vm)
615
{
616
struct userspace_mem_region *region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
617
618
ucall_init(vm, region->region.guest_phys_addr + region->region.memory_size);
619
}
620
621
static void setup_default_handlers(struct test_desc *test)
622
{
623
if (!test->mmio_handler)
624
test->mmio_handler = mmio_no_handler;
625
626
if (!test->fail_vcpu_run_handler)
627
test->fail_vcpu_run_handler = fail_vcpu_run_no_handler;
628
}
629
630
static void check_event_counts(struct test_desc *test)
631
{
632
TEST_ASSERT_EQ(test->expected_events.uffd_faults, events.uffd_faults);
633
TEST_ASSERT_EQ(test->expected_events.mmio_exits, events.mmio_exits);
634
TEST_ASSERT_EQ(test->expected_events.fail_vcpu_runs, events.fail_vcpu_runs);
635
}
636
637
static void print_test_banner(enum vm_guest_mode mode, struct test_params *p)
638
{
639
struct test_desc *test = p->test_desc;
640
641
pr_debug("Test: %s\n", test->name);
642
pr_debug("Testing guest mode: %s\n", vm_guest_mode_string(mode));
643
pr_debug("Testing memory backing src type: %s\n",
644
vm_mem_backing_src_alias(p->src_type)->name);
645
}
646
647
static void reset_event_counts(void)
648
{
649
memset(&events, 0, sizeof(events));
650
}
651
652
/*
653
* This function either succeeds, skips the test (after setting test->skip), or
654
* fails with a TEST_FAIL that aborts all tests.
655
*/
656
static void vcpu_run_loop(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
657
struct test_desc *test)
658
{
659
struct kvm_run *run;
660
struct ucall uc;
661
int ret;
662
663
run = vcpu->run;
664
665
for (;;) {
666
ret = _vcpu_run(vcpu);
667
if (ret) {
668
test->fail_vcpu_run_handler(ret);
669
goto done;
670
}
671
672
switch (get_ucall(vcpu, &uc)) {
673
case UCALL_SYNC:
674
if (!handle_cmd(vm, uc.args[1])) {
675
test->skip = true;
676
goto done;
677
}
678
break;
679
case UCALL_ABORT:
680
REPORT_GUEST_ASSERT(uc);
681
break;
682
case UCALL_DONE:
683
goto done;
684
case UCALL_NONE:
685
if (run->exit_reason == KVM_EXIT_MMIO)
686
test->mmio_handler(vm, run);
687
break;
688
default:
689
TEST_FAIL("Unknown ucall %lu", uc.cmd);
690
}
691
}
692
693
done:
694
pr_debug(test->skip ? "Skipped.\n" : "Done.\n");
695
}
696
697
static void run_test(enum vm_guest_mode mode, void *arg)
698
{
699
struct test_params *p = (struct test_params *)arg;
700
struct test_desc *test = p->test_desc;
701
struct kvm_vm *vm;
702
struct kvm_vcpu *vcpu;
703
struct uffd_desc *pt_uffd, *data_uffd;
704
705
print_test_banner(mode, p);
706
707
vm = ____vm_create(VM_SHAPE(mode));
708
setup_memslots(vm, p);
709
kvm_vm_elf_load(vm, program_invocation_name);
710
setup_ucall(vm);
711
vcpu = vm_vcpu_add(vm, 0, guest_code);
712
713
setup_gva_maps(vm);
714
715
reset_event_counts();
716
717
/*
718
* Set some code in the data memslot for the guest to execute (only
719
* applicable to the EXEC tests). This has to be done before
720
* setup_uffd() as that function copies the memslot data for the uffd
721
* handler.
722
*/
723
load_exec_code_for_test(vm);
724
setup_uffd(vm, p, &pt_uffd, &data_uffd);
725
setup_abort_handlers(vm, vcpu, test);
726
setup_default_handlers(test);
727
vcpu_args_set(vcpu, 1, test);
728
729
vcpu_run_loop(vm, vcpu, test);
730
731
kvm_vm_free(vm);
732
free_uffd(test, pt_uffd, data_uffd);
733
734
/*
735
* Make sure we check the events after the uffd threads have exited,
736
* which means they updated their respective event counters.
737
*/
738
if (!test->skip)
739
check_event_counts(test);
740
}
741
742
static void help(char *name)
743
{
744
puts("");
745
printf("usage: %s [-h] [-s mem-type]\n", name);
746
puts("");
747
guest_modes_help();
748
backing_src_help("-s");
749
puts("");
750
}
751
752
#define SNAME(s) #s
753
#define SCAT2(a, b) SNAME(a ## _ ## b)
754
#define SCAT3(a, b, c) SCAT2(a, SCAT2(b, c))
755
#define SCAT4(a, b, c, d) SCAT2(a, SCAT3(b, c, d))
756
757
#define _CHECK(_test) _CHECK_##_test
758
#define _PREPARE(_test) _PREPARE_##_test
759
#define _PREPARE_guest_read64 NULL
760
#define _PREPARE_guest_ld_preidx NULL
761
#define _PREPARE_guest_write64 NULL
762
#define _PREPARE_guest_st_preidx NULL
763
#define _PREPARE_guest_exec NULL
764
#define _PREPARE_guest_at NULL
765
#define _PREPARE_guest_dc_zva guest_check_dc_zva
766
#define _PREPARE_guest_cas guest_check_lse
767
768
/* With or without access flag checks */
769
#define _PREPARE_with_af guest_set_ha, guest_clear_pte_af
770
#define _PREPARE_no_af NULL
771
#define _CHECK_with_af guest_check_pte_af
772
#define _CHECK_no_af NULL
773
774
/* Performs an access and checks that no faults were triggered. */
775
#define TEST_ACCESS(_access, _with_af, _mark_cmd) \
776
{ \
777
.name = SCAT3(_access, _with_af, #_mark_cmd), \
778
.guest_prepare = { _PREPARE(_with_af), \
779
_PREPARE(_access) }, \
780
.mem_mark_cmd = _mark_cmd, \
781
.guest_test = _access, \
782
.guest_test_check = { _CHECK(_with_af) }, \
783
.expected_events = { 0 }, \
784
}
785
786
#define TEST_UFFD(_access, _with_af, _mark_cmd, \
787
_uffd_data_handler, _uffd_pt_handler, _uffd_faults) \
788
{ \
789
.name = SCAT4(uffd, _access, _with_af, #_mark_cmd), \
790
.guest_prepare = { _PREPARE(_with_af), \
791
_PREPARE(_access) }, \
792
.guest_test = _access, \
793
.mem_mark_cmd = _mark_cmd, \
794
.guest_test_check = { _CHECK(_with_af) }, \
795
.uffd_data_handler = _uffd_data_handler, \
796
.uffd_pt_handler = _uffd_pt_handler, \
797
.expected_events = { .uffd_faults = _uffd_faults, }, \
798
}
799
800
#define TEST_DIRTY_LOG(_access, _with_af, _test_check, _pt_check) \
801
{ \
802
.name = SCAT3(dirty_log, _access, _with_af), \
803
.data_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
804
.pt_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
805
.guest_prepare = { _PREPARE(_with_af), \
806
_PREPARE(_access) }, \
807
.guest_test = _access, \
808
.guest_test_check = { _CHECK(_with_af), _test_check, _pt_check }, \
809
.expected_events = { 0 }, \
810
}
811
812
#define TEST_UFFD_AND_DIRTY_LOG(_access, _with_af, _uffd_data_handler, \
813
_uffd_faults, _test_check, _pt_check) \
814
{ \
815
.name = SCAT3(uffd_and_dirty_log, _access, _with_af), \
816
.data_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
817
.pt_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
818
.guest_prepare = { _PREPARE(_with_af), \
819
_PREPARE(_access) }, \
820
.guest_test = _access, \
821
.mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
822
.guest_test_check = { _CHECK(_with_af), _test_check, _pt_check }, \
823
.uffd_data_handler = _uffd_data_handler, \
824
.uffd_pt_handler = uffd_pt_handler, \
825
.expected_events = { .uffd_faults = _uffd_faults, }, \
826
}
827
828
#define TEST_RO_MEMSLOT(_access, _mmio_handler, _mmio_exits) \
829
{ \
830
.name = SCAT2(ro_memslot, _access), \
831
.data_memslot_flags = KVM_MEM_READONLY, \
832
.pt_memslot_flags = KVM_MEM_READONLY, \
833
.guest_prepare = { _PREPARE(_access) }, \
834
.guest_test = _access, \
835
.mmio_handler = _mmio_handler, \
836
.expected_events = { .mmio_exits = _mmio_exits }, \
837
}
838
839
#define TEST_RO_MEMSLOT_NO_SYNDROME(_access) \
840
{ \
841
.name = SCAT2(ro_memslot_no_syndrome, _access), \
842
.data_memslot_flags = KVM_MEM_READONLY, \
843
.pt_memslot_flags = KVM_MEM_READONLY, \
844
.guest_prepare = { _PREPARE(_access) }, \
845
.guest_test = _access, \
846
.fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
847
.expected_events = { .fail_vcpu_runs = 1 }, \
848
}
849
850
#define TEST_RO_MEMSLOT_AND_DIRTY_LOG(_access, _mmio_handler, _mmio_exits, \
851
_test_check) \
852
{ \
853
.name = SCAT2(ro_memslot, _access), \
854
.data_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
855
.pt_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
856
.guest_prepare = { _PREPARE(_access) }, \
857
.guest_test = _access, \
858
.guest_test_check = { _test_check }, \
859
.mmio_handler = _mmio_handler, \
860
.expected_events = { .mmio_exits = _mmio_exits}, \
861
}
862
863
#define TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(_access, _test_check) \
864
{ \
865
.name = SCAT2(ro_memslot_no_syn_and_dlog, _access), \
866
.data_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
867
.pt_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
868
.guest_prepare = { _PREPARE(_access) }, \
869
.guest_test = _access, \
870
.guest_test_check = { _test_check }, \
871
.fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
872
.expected_events = { .fail_vcpu_runs = 1 }, \
873
}
874
875
#define TEST_RO_MEMSLOT_AND_UFFD(_access, _mmio_handler, _mmio_exits, \
876
_uffd_data_handler, _uffd_faults) \
877
{ \
878
.name = SCAT2(ro_memslot_uffd, _access), \
879
.data_memslot_flags = KVM_MEM_READONLY, \
880
.pt_memslot_flags = KVM_MEM_READONLY, \
881
.mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
882
.guest_prepare = { _PREPARE(_access) }, \
883
.guest_test = _access, \
884
.uffd_data_handler = _uffd_data_handler, \
885
.uffd_pt_handler = uffd_pt_handler, \
886
.mmio_handler = _mmio_handler, \
887
.expected_events = { .mmio_exits = _mmio_exits, \
888
.uffd_faults = _uffd_faults }, \
889
}
890
891
#define TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(_access, _uffd_data_handler, \
892
_uffd_faults) \
893
{ \
894
.name = SCAT2(ro_memslot_no_syndrome, _access), \
895
.data_memslot_flags = KVM_MEM_READONLY, \
896
.pt_memslot_flags = KVM_MEM_READONLY, \
897
.mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
898
.guest_prepare = { _PREPARE(_access) }, \
899
.guest_test = _access, \
900
.uffd_data_handler = _uffd_data_handler, \
901
.uffd_pt_handler = uffd_pt_handler, \
902
.fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
903
.expected_events = { .fail_vcpu_runs = 1, \
904
.uffd_faults = _uffd_faults }, \
905
}
906
907
static struct test_desc tests[] = {
908
909
/* Check that HW is setting the Access Flag (AF) (sanity checks). */
910
TEST_ACCESS(guest_read64, with_af, CMD_NONE),
911
TEST_ACCESS(guest_ld_preidx, with_af, CMD_NONE),
912
TEST_ACCESS(guest_cas, with_af, CMD_NONE),
913
TEST_ACCESS(guest_write64, with_af, CMD_NONE),
914
TEST_ACCESS(guest_st_preidx, with_af, CMD_NONE),
915
TEST_ACCESS(guest_dc_zva, with_af, CMD_NONE),
916
TEST_ACCESS(guest_exec, with_af, CMD_NONE),
917
918
/*
919
* Punch a hole in the data backing store, and then try multiple
920
* accesses: reads should rturn zeroes, and writes should
921
* re-populate the page. Moreover, the test also check that no
922
* exception was generated in the guest. Note that this
923
* reading/writing behavior is the same as reading/writing a
924
* punched page (with fallocate(FALLOC_FL_PUNCH_HOLE)) from
925
* userspace.
926
*/
927
TEST_ACCESS(guest_read64, no_af, CMD_HOLE_DATA),
928
TEST_ACCESS(guest_cas, no_af, CMD_HOLE_DATA),
929
TEST_ACCESS(guest_ld_preidx, no_af, CMD_HOLE_DATA),
930
TEST_ACCESS(guest_write64, no_af, CMD_HOLE_DATA),
931
TEST_ACCESS(guest_st_preidx, no_af, CMD_HOLE_DATA),
932
TEST_ACCESS(guest_at, no_af, CMD_HOLE_DATA),
933
TEST_ACCESS(guest_dc_zva, no_af, CMD_HOLE_DATA),
934
935
/*
936
* Punch holes in the data and PT backing stores and mark them for
937
* userfaultfd handling. This should result in 2 faults: the access
938
* on the data backing store, and its respective S1 page table walk
939
* (S1PTW).
940
*/
941
TEST_UFFD(guest_read64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
942
uffd_data_handler, uffd_pt_handler, 2),
943
TEST_UFFD(guest_read64, no_af, CMD_HOLE_DATA | CMD_HOLE_PT,
944
uffd_data_handler, uffd_pt_handler, 2),
945
TEST_UFFD(guest_cas, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
946
uffd_data_handler, uffd_pt_handler, 2),
947
/*
948
* Can't test guest_at with_af as it's IMPDEF whether the AF is set.
949
* The S1PTW fault should still be marked as a write.
950
*/
951
TEST_UFFD(guest_at, no_af, CMD_HOLE_DATA | CMD_HOLE_PT,
952
uffd_no_handler, uffd_pt_handler, 1),
953
TEST_UFFD(guest_ld_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
954
uffd_data_handler, uffd_pt_handler, 2),
955
TEST_UFFD(guest_write64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
956
uffd_data_handler, uffd_pt_handler, 2),
957
TEST_UFFD(guest_dc_zva, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
958
uffd_data_handler, uffd_pt_handler, 2),
959
TEST_UFFD(guest_st_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
960
uffd_data_handler, uffd_pt_handler, 2),
961
TEST_UFFD(guest_exec, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
962
uffd_data_handler, uffd_pt_handler, 2),
963
964
/*
965
* Try accesses when the data and PT memory regions are both
966
* tracked for dirty logging.
967
*/
968
TEST_DIRTY_LOG(guest_read64, with_af, guest_check_no_write_in_dirty_log,
969
guest_check_s1ptw_wr_in_dirty_log),
970
TEST_DIRTY_LOG(guest_read64, no_af, guest_check_no_write_in_dirty_log,
971
guest_check_no_s1ptw_wr_in_dirty_log),
972
TEST_DIRTY_LOG(guest_ld_preidx, with_af,
973
guest_check_no_write_in_dirty_log,
974
guest_check_s1ptw_wr_in_dirty_log),
975
TEST_DIRTY_LOG(guest_at, no_af, guest_check_no_write_in_dirty_log,
976
guest_check_no_s1ptw_wr_in_dirty_log),
977
TEST_DIRTY_LOG(guest_exec, with_af, guest_check_no_write_in_dirty_log,
978
guest_check_s1ptw_wr_in_dirty_log),
979
TEST_DIRTY_LOG(guest_write64, with_af, guest_check_write_in_dirty_log,
980
guest_check_s1ptw_wr_in_dirty_log),
981
TEST_DIRTY_LOG(guest_cas, with_af, guest_check_write_in_dirty_log,
982
guest_check_s1ptw_wr_in_dirty_log),
983
TEST_DIRTY_LOG(guest_dc_zva, with_af, guest_check_write_in_dirty_log,
984
guest_check_s1ptw_wr_in_dirty_log),
985
TEST_DIRTY_LOG(guest_st_preidx, with_af, guest_check_write_in_dirty_log,
986
guest_check_s1ptw_wr_in_dirty_log),
987
988
/*
989
* Access when the data and PT memory regions are both marked for
990
* dirty logging and UFFD at the same time. The expected result is
991
* that writes should mark the dirty log and trigger a userfaultfd
992
* write fault. Reads/execs should result in a read userfaultfd
993
* fault, and nothing in the dirty log. Any S1PTW should result in
994
* a write in the dirty log and a userfaultfd write.
995
*/
996
TEST_UFFD_AND_DIRTY_LOG(guest_read64, with_af,
997
uffd_data_handler, 2,
998
guest_check_no_write_in_dirty_log,
999
guest_check_s1ptw_wr_in_dirty_log),
1000
TEST_UFFD_AND_DIRTY_LOG(guest_read64, no_af,
1001
uffd_data_handler, 2,
1002
guest_check_no_write_in_dirty_log,
1003
guest_check_no_s1ptw_wr_in_dirty_log),
1004
TEST_UFFD_AND_DIRTY_LOG(guest_ld_preidx, with_af,
1005
uffd_data_handler,
1006
2, guest_check_no_write_in_dirty_log,
1007
guest_check_s1ptw_wr_in_dirty_log),
1008
TEST_UFFD_AND_DIRTY_LOG(guest_at, with_af, uffd_no_handler, 1,
1009
guest_check_no_write_in_dirty_log,
1010
guest_check_s1ptw_wr_in_dirty_log),
1011
TEST_UFFD_AND_DIRTY_LOG(guest_exec, with_af,
1012
uffd_data_handler, 2,
1013
guest_check_no_write_in_dirty_log,
1014
guest_check_s1ptw_wr_in_dirty_log),
1015
TEST_UFFD_AND_DIRTY_LOG(guest_write64, with_af,
1016
uffd_data_handler,
1017
2, guest_check_write_in_dirty_log,
1018
guest_check_s1ptw_wr_in_dirty_log),
1019
TEST_UFFD_AND_DIRTY_LOG(guest_cas, with_af,
1020
uffd_data_handler, 2,
1021
guest_check_write_in_dirty_log,
1022
guest_check_s1ptw_wr_in_dirty_log),
1023
TEST_UFFD_AND_DIRTY_LOG(guest_dc_zva, with_af,
1024
uffd_data_handler,
1025
2, guest_check_write_in_dirty_log,
1026
guest_check_s1ptw_wr_in_dirty_log),
1027
TEST_UFFD_AND_DIRTY_LOG(guest_st_preidx, with_af,
1028
uffd_data_handler, 2,
1029
guest_check_write_in_dirty_log,
1030
guest_check_s1ptw_wr_in_dirty_log),
1031
/*
1032
* Access when both the PT and data regions are marked read-only
1033
* (with KVM_MEM_READONLY). Writes with a syndrome result in an
1034
* MMIO exit, writes with no syndrome (e.g., CAS) result in a
1035
* failed vcpu run, and reads/execs with and without syndroms do
1036
* not fault.
1037
*/
1038
TEST_RO_MEMSLOT(guest_read64, 0, 0),
1039
TEST_RO_MEMSLOT(guest_ld_preidx, 0, 0),
1040
TEST_RO_MEMSLOT(guest_at, 0, 0),
1041
TEST_RO_MEMSLOT(guest_exec, 0, 0),
1042
TEST_RO_MEMSLOT(guest_write64, mmio_on_test_gpa_handler, 1),
1043
TEST_RO_MEMSLOT_NO_SYNDROME(guest_dc_zva),
1044
TEST_RO_MEMSLOT_NO_SYNDROME(guest_cas),
1045
TEST_RO_MEMSLOT_NO_SYNDROME(guest_st_preidx),
1046
1047
/*
1048
* The PT and data regions are both read-only and marked
1049
* for dirty logging at the same time. The expected result is that
1050
* for writes there should be no write in the dirty log. The
1051
* readonly handling is the same as if the memslot was not marked
1052
* for dirty logging: writes with a syndrome result in an MMIO
1053
* exit, and writes with no syndrome result in a failed vcpu run.
1054
*/
1055
TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_read64, 0, 0,
1056
guest_check_no_write_in_dirty_log),
1057
TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_ld_preidx, 0, 0,
1058
guest_check_no_write_in_dirty_log),
1059
TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_at, 0, 0,
1060
guest_check_no_write_in_dirty_log),
1061
TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_exec, 0, 0,
1062
guest_check_no_write_in_dirty_log),
1063
TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_write64, mmio_on_test_gpa_handler,
1064
1, guest_check_no_write_in_dirty_log),
1065
TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_dc_zva,
1066
guest_check_no_write_in_dirty_log),
1067
TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_cas,
1068
guest_check_no_write_in_dirty_log),
1069
TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_st_preidx,
1070
guest_check_no_write_in_dirty_log),
1071
1072
/*
1073
* The PT and data regions are both read-only and punched with
1074
* holes tracked with userfaultfd. The expected result is the
1075
* union of both userfaultfd and read-only behaviors. For example,
1076
* write accesses result in a userfaultfd write fault and an MMIO
1077
* exit. Writes with no syndrome result in a failed vcpu run and
1078
* no userfaultfd write fault. Reads result in userfaultfd getting
1079
* triggered.
1080
*/
1081
TEST_RO_MEMSLOT_AND_UFFD(guest_read64, 0, 0, uffd_data_handler, 2),
1082
TEST_RO_MEMSLOT_AND_UFFD(guest_ld_preidx, 0, 0, uffd_data_handler, 2),
1083
TEST_RO_MEMSLOT_AND_UFFD(guest_at, 0, 0, uffd_no_handler, 1),
1084
TEST_RO_MEMSLOT_AND_UFFD(guest_exec, 0, 0, uffd_data_handler, 2),
1085
TEST_RO_MEMSLOT_AND_UFFD(guest_write64, mmio_on_test_gpa_handler, 1,
1086
uffd_data_handler, 2),
1087
TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_cas, uffd_data_handler, 2),
1088
TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_dc_zva, uffd_no_handler, 1),
1089
TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_st_preidx, uffd_no_handler, 1),
1090
1091
{ 0 }
1092
};
1093
1094
static void for_each_test_and_guest_mode(enum vm_mem_backing_src_type src_type)
1095
{
1096
struct test_desc *t;
1097
1098
for (t = &tests[0]; t->name; t++) {
1099
if (t->skip)
1100
continue;
1101
1102
struct test_params p = {
1103
.src_type = src_type,
1104
.test_desc = t,
1105
};
1106
1107
for_each_guest_mode(run_test, &p);
1108
}
1109
}
1110
1111
int main(int argc, char *argv[])
1112
{
1113
enum vm_mem_backing_src_type src_type;
1114
int opt;
1115
1116
src_type = DEFAULT_VM_MEM_SRC;
1117
1118
while ((opt = getopt(argc, argv, "hm:s:")) != -1) {
1119
switch (opt) {
1120
case 'm':
1121
guest_modes_cmdline(optarg);
1122
break;
1123
case 's':
1124
src_type = parse_backing_src_type(optarg);
1125
break;
1126
case 'h':
1127
default:
1128
help(argv[0]);
1129
exit(0);
1130
}
1131
}
1132
1133
for_each_test_and_guest_mode(src_type);
1134
return 0;
1135
}
1136
1137