Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/selftests/iommu/iommufd_fail_nth.c
26302 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
3
*
4
* These tests are "kernel integrity" tests. They are looking for kernel
5
* WARN/OOPS/kasn/etc splats triggered by kernel sanitizers & debugging
6
* features. It does not attempt to verify that the system calls are doing what
7
* they are supposed to do.
8
*
9
* The basic philosophy is to run a sequence of calls that will succeed and then
10
* sweep every failure injection point on that call chain to look for
11
* interesting things in error handling.
12
*
13
* This test is best run with:
14
* echo 1 > /proc/sys/kernel/panic_on_warn
15
* If something is actually going wrong.
16
*/
17
#include <fcntl.h>
18
#include <dirent.h>
19
20
#define __EXPORTED_HEADERS__
21
#include <linux/vfio.h>
22
23
#include "iommufd_utils.h"
24
25
static bool have_fault_injection;
26
27
static int writeat(int dfd, const char *fn, const char *val)
28
{
29
size_t val_len = strlen(val);
30
ssize_t res;
31
int fd;
32
33
fd = openat(dfd, fn, O_WRONLY);
34
if (fd == -1)
35
return -1;
36
res = write(fd, val, val_len);
37
assert(res == val_len);
38
close(fd);
39
return 0;
40
}
41
42
static __attribute__((constructor)) void setup_buffer(void)
43
{
44
PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
45
46
BUFFER_SIZE = 2*1024*1024;
47
48
buffer = mmap(0, BUFFER_SIZE, PROT_READ | PROT_WRITE,
49
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
50
51
mfd_buffer = memfd_mmap(BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
52
&mfd);
53
}
54
55
/*
56
* This sets up fail_injection in a way that is useful for this test.
57
* It does not attempt to restore things back to how they were.
58
*/
59
static __attribute__((constructor)) void setup_fault_injection(void)
60
{
61
DIR *debugfs = opendir("/sys/kernel/debug/");
62
struct dirent *dent;
63
64
if (!debugfs)
65
return;
66
67
/* Allow any allocation call to be fault injected */
68
if (writeat(dirfd(debugfs), "failslab/ignore-gfp-wait", "N"))
69
return;
70
writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-wait", "N");
71
writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-highmem", "N");
72
73
while ((dent = readdir(debugfs))) {
74
char fn[300];
75
76
if (strncmp(dent->d_name, "fail", 4) != 0)
77
continue;
78
79
/* We are looking for kernel splats, quiet down the log */
80
snprintf(fn, sizeof(fn), "%s/verbose", dent->d_name);
81
writeat(dirfd(debugfs), fn, "0");
82
}
83
closedir(debugfs);
84
have_fault_injection = true;
85
}
86
87
struct fail_nth_state {
88
int proc_fd;
89
unsigned int iteration;
90
};
91
92
static void fail_nth_first(struct __test_metadata *_metadata,
93
struct fail_nth_state *nth_state)
94
{
95
char buf[300];
96
97
snprintf(buf, sizeof(buf), "/proc/self/task/%u/fail-nth", getpid());
98
nth_state->proc_fd = open(buf, O_RDWR);
99
ASSERT_NE(-1, nth_state->proc_fd);
100
}
101
102
static bool fail_nth_next(struct __test_metadata *_metadata,
103
struct fail_nth_state *nth_state,
104
int test_result)
105
{
106
static const char disable_nth[] = "0";
107
char buf[300];
108
109
/*
110
* This is just an arbitrary limit based on the current kernel
111
* situation. Changes in the kernel can dramatically change the number of
112
* required fault injection sites, so if this hits it doesn't
113
* necessarily mean a test failure, just that the limit has to be made
114
* bigger.
115
*/
116
ASSERT_GT(400, nth_state->iteration);
117
if (nth_state->iteration != 0) {
118
ssize_t res;
119
ssize_t res2;
120
121
buf[0] = 0;
122
/*
123
* Annoyingly disabling the nth can also fail. This means
124
* the test passed without triggering failure
125
*/
126
res = pread(nth_state->proc_fd, buf, sizeof(buf), 0);
127
if (res == -1 && errno == EFAULT) {
128
buf[0] = '1';
129
buf[1] = '\n';
130
res = 2;
131
}
132
133
res2 = pwrite(nth_state->proc_fd, disable_nth,
134
ARRAY_SIZE(disable_nth) - 1, 0);
135
if (res2 == -1 && errno == EFAULT) {
136
res2 = pwrite(nth_state->proc_fd, disable_nth,
137
ARRAY_SIZE(disable_nth) - 1, 0);
138
buf[0] = '1';
139
buf[1] = '\n';
140
}
141
ASSERT_EQ(ARRAY_SIZE(disable_nth) - 1, res2);
142
143
/* printf(" nth %u result=%d nth=%u\n", nth_state->iteration,
144
test_result, atoi(buf)); */
145
fflush(stdout);
146
ASSERT_LT(1, res);
147
if (res != 2 || buf[0] != '0' || buf[1] != '\n')
148
return false;
149
} else {
150
/* printf(" nth %u result=%d\n", nth_state->iteration,
151
test_result); */
152
}
153
nth_state->iteration++;
154
return true;
155
}
156
157
/*
158
* This is called during the test to start failure injection. It allows the test
159
* to do some setup that has already been swept and thus reduce the required
160
* iterations.
161
*/
162
void __fail_nth_enable(struct __test_metadata *_metadata,
163
struct fail_nth_state *nth_state)
164
{
165
char buf[300];
166
size_t len;
167
168
if (!nth_state->iteration)
169
return;
170
171
len = snprintf(buf, sizeof(buf), "%u", nth_state->iteration);
172
ASSERT_EQ(len, pwrite(nth_state->proc_fd, buf, len, 0));
173
}
174
#define fail_nth_enable() __fail_nth_enable(_metadata, _nth_state)
175
176
#define TEST_FAIL_NTH(fixture_name, name) \
177
static int test_nth_##name(struct __test_metadata *_metadata, \
178
FIXTURE_DATA(fixture_name) *self, \
179
const FIXTURE_VARIANT(fixture_name) \
180
*variant, \
181
struct fail_nth_state *_nth_state); \
182
TEST_F(fixture_name, name) \
183
{ \
184
struct fail_nth_state nth_state = {}; \
185
int test_result = 0; \
186
\
187
if (!have_fault_injection) \
188
SKIP(return, \
189
"fault injection is not enabled in the kernel"); \
190
fail_nth_first(_metadata, &nth_state); \
191
ASSERT_EQ(0, test_nth_##name(_metadata, self, variant, \
192
&nth_state)); \
193
while (fail_nth_next(_metadata, &nth_state, test_result)) { \
194
fixture_name##_teardown(_metadata, self, variant); \
195
fixture_name##_setup(_metadata, self, variant); \
196
test_result = test_nth_##name(_metadata, self, \
197
variant, &nth_state); \
198
}; \
199
ASSERT_EQ(0, test_result); \
200
} \
201
static int test_nth_##name( \
202
struct __test_metadata __attribute__((unused)) *_metadata, \
203
FIXTURE_DATA(fixture_name) __attribute__((unused)) *self, \
204
const FIXTURE_VARIANT(fixture_name) __attribute__((unused)) \
205
*variant, \
206
struct fail_nth_state *_nth_state)
207
208
FIXTURE(basic_fail_nth)
209
{
210
int fd;
211
uint32_t access_id;
212
uint32_t stdev_id;
213
uint32_t pasid;
214
};
215
216
FIXTURE_SETUP(basic_fail_nth)
217
{
218
self->fd = -1;
219
self->access_id = 0;
220
self->stdev_id = 0;
221
self->pasid = 0; //test should use a non-zero value
222
}
223
224
FIXTURE_TEARDOWN(basic_fail_nth)
225
{
226
int rc;
227
228
if (self->access_id) {
229
/* The access FD holds the iommufd open until it closes */
230
rc = _test_cmd_destroy_access(self->access_id);
231
assert(rc == 0);
232
}
233
if (self->pasid && self->stdev_id)
234
_test_cmd_pasid_detach(self->fd, self->stdev_id, self->pasid);
235
teardown_iommufd(self->fd, _metadata);
236
}
237
238
/* Cover ioas.c */
239
TEST_FAIL_NTH(basic_fail_nth, basic)
240
{
241
struct iommu_iova_range ranges[10];
242
uint32_t ioas_id;
243
__u64 iova;
244
245
fail_nth_enable();
246
247
self->fd = open("/dev/iommu", O_RDWR);
248
if (self->fd == -1)
249
return -1;
250
251
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
252
return -1;
253
254
{
255
struct iommu_ioas_iova_ranges ranges_cmd = {
256
.size = sizeof(ranges_cmd),
257
.num_iovas = ARRAY_SIZE(ranges),
258
.ioas_id = ioas_id,
259
.allowed_iovas = (uintptr_t)ranges,
260
};
261
if (ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd))
262
return -1;
263
}
264
265
{
266
struct iommu_ioas_allow_iovas allow_cmd = {
267
.size = sizeof(allow_cmd),
268
.ioas_id = ioas_id,
269
.num_iovas = 1,
270
.allowed_iovas = (uintptr_t)ranges,
271
};
272
273
ranges[0].start = 16*1024;
274
ranges[0].last = BUFFER_SIZE + 16 * 1024 * 600 - 1;
275
if (ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd))
276
return -1;
277
}
278
279
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
280
IOMMU_IOAS_MAP_WRITEABLE |
281
IOMMU_IOAS_MAP_READABLE))
282
return -1;
283
284
{
285
struct iommu_ioas_copy copy_cmd = {
286
.size = sizeof(copy_cmd),
287
.flags = IOMMU_IOAS_MAP_WRITEABLE |
288
IOMMU_IOAS_MAP_READABLE,
289
.dst_ioas_id = ioas_id,
290
.src_ioas_id = ioas_id,
291
.src_iova = iova,
292
.length = sizeof(ranges),
293
};
294
295
if (ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd))
296
return -1;
297
}
298
299
if (_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE,
300
NULL))
301
return -1;
302
/* Failure path of no IOVA to unmap */
303
_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE, NULL);
304
return 0;
305
}
306
307
/* iopt_area_fill_domains() and iopt_area_fill_domain() */
308
TEST_FAIL_NTH(basic_fail_nth, map_domain)
309
{
310
uint32_t ioas_id;
311
__u32 stdev_id;
312
__u32 hwpt_id;
313
__u64 iova;
314
315
self->fd = open("/dev/iommu", O_RDWR);
316
if (self->fd == -1)
317
return -1;
318
319
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
320
return -1;
321
322
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
323
return -1;
324
325
fail_nth_enable();
326
327
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
328
return -1;
329
330
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
331
IOMMU_IOAS_MAP_WRITEABLE |
332
IOMMU_IOAS_MAP_READABLE))
333
return -1;
334
335
if (_test_ioctl_destroy(self->fd, stdev_id))
336
return -1;
337
338
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
339
return -1;
340
return 0;
341
}
342
343
/* iopt_area_fill_domains() and iopt_area_fill_domain() */
344
TEST_FAIL_NTH(basic_fail_nth, map_file_domain)
345
{
346
uint32_t ioas_id;
347
__u32 stdev_id;
348
__u32 hwpt_id;
349
__u64 iova;
350
351
self->fd = open("/dev/iommu", O_RDWR);
352
if (self->fd == -1)
353
return -1;
354
355
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
356
return -1;
357
358
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
359
return -1;
360
361
fail_nth_enable();
362
363
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
364
return -1;
365
366
if (_test_ioctl_ioas_map_file(self->fd, ioas_id, mfd, 0, 262144, &iova,
367
IOMMU_IOAS_MAP_WRITEABLE |
368
IOMMU_IOAS_MAP_READABLE))
369
return -1;
370
371
if (_test_ioctl_destroy(self->fd, stdev_id))
372
return -1;
373
374
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
375
return -1;
376
return 0;
377
}
378
379
TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
380
{
381
uint32_t ioas_id;
382
__u32 stdev_id2;
383
__u32 stdev_id;
384
__u32 hwpt_id2;
385
__u32 hwpt_id;
386
__u64 iova;
387
388
self->fd = open("/dev/iommu", O_RDWR);
389
if (self->fd == -1)
390
return -1;
391
392
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
393
return -1;
394
395
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
396
return -1;
397
398
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
399
return -1;
400
401
fail_nth_enable();
402
403
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2,
404
NULL))
405
return -1;
406
407
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
408
IOMMU_IOAS_MAP_WRITEABLE |
409
IOMMU_IOAS_MAP_READABLE))
410
return -1;
411
412
if (_test_ioctl_destroy(self->fd, stdev_id))
413
return -1;
414
415
if (_test_ioctl_destroy(self->fd, stdev_id2))
416
return -1;
417
418
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
419
return -1;
420
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2,
421
NULL))
422
return -1;
423
return 0;
424
}
425
426
TEST_FAIL_NTH(basic_fail_nth, access_rw)
427
{
428
uint64_t tmp_big[4096];
429
uint32_t ioas_id;
430
uint16_t tmp[32];
431
__u64 iova;
432
433
self->fd = open("/dev/iommu", O_RDWR);
434
if (self->fd == -1)
435
return -1;
436
437
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
438
return -1;
439
440
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
441
return -1;
442
443
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
444
IOMMU_IOAS_MAP_WRITEABLE |
445
IOMMU_IOAS_MAP_READABLE))
446
return -1;
447
448
fail_nth_enable();
449
450
if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id, 0))
451
return -1;
452
453
{
454
struct iommu_test_cmd access_cmd = {
455
.size = sizeof(access_cmd),
456
.op = IOMMU_TEST_OP_ACCESS_RW,
457
.id = self->access_id,
458
.access_rw = { .iova = iova,
459
.length = sizeof(tmp),
460
.uptr = (uintptr_t)tmp },
461
};
462
463
// READ
464
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
465
&access_cmd))
466
return -1;
467
468
access_cmd.access_rw.flags = MOCK_ACCESS_RW_WRITE;
469
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
470
&access_cmd))
471
return -1;
472
473
access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH;
474
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
475
&access_cmd))
476
return -1;
477
access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH |
478
MOCK_ACCESS_RW_WRITE;
479
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
480
&access_cmd))
481
return -1;
482
}
483
484
{
485
struct iommu_test_cmd access_cmd = {
486
.size = sizeof(access_cmd),
487
.op = IOMMU_TEST_OP_ACCESS_RW,
488
.id = self->access_id,
489
.access_rw = { .iova = iova,
490
.flags = MOCK_ACCESS_RW_SLOW_PATH,
491
.length = sizeof(tmp_big),
492
.uptr = (uintptr_t)tmp_big },
493
};
494
495
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
496
&access_cmd))
497
return -1;
498
}
499
if (_test_cmd_destroy_access(self->access_id))
500
return -1;
501
self->access_id = 0;
502
return 0;
503
}
504
505
/* pages.c access functions */
506
TEST_FAIL_NTH(basic_fail_nth, access_pin)
507
{
508
uint32_t access_pages_id;
509
uint32_t ioas_id;
510
__u64 iova;
511
512
self->fd = open("/dev/iommu", O_RDWR);
513
if (self->fd == -1)
514
return -1;
515
516
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
517
return -1;
518
519
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
520
return -1;
521
522
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
523
IOMMU_IOAS_MAP_WRITEABLE |
524
IOMMU_IOAS_MAP_READABLE))
525
return -1;
526
527
if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id,
528
MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
529
return -1;
530
531
fail_nth_enable();
532
533
{
534
struct iommu_test_cmd access_cmd = {
535
.size = sizeof(access_cmd),
536
.op = IOMMU_TEST_OP_ACCESS_PAGES,
537
.id = self->access_id,
538
.access_pages = { .iova = iova,
539
.length = BUFFER_SIZE,
540
.uptr = (uintptr_t)buffer },
541
};
542
543
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
544
&access_cmd))
545
return -1;
546
access_pages_id = access_cmd.access_pages.out_access_pages_id;
547
}
548
549
if (_test_cmd_destroy_access_pages(self->fd, self->access_id,
550
access_pages_id))
551
return -1;
552
553
if (_test_cmd_destroy_access(self->access_id))
554
return -1;
555
self->access_id = 0;
556
return 0;
557
}
558
559
/* iopt_pages_fill_xarray() */
560
TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
561
{
562
uint32_t access_pages_id;
563
uint32_t ioas_id;
564
__u32 stdev_id;
565
__u32 hwpt_id;
566
__u64 iova;
567
568
self->fd = open("/dev/iommu", O_RDWR);
569
if (self->fd == -1)
570
return -1;
571
572
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
573
return -1;
574
575
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
576
return -1;
577
578
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
579
return -1;
580
581
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
582
IOMMU_IOAS_MAP_WRITEABLE |
583
IOMMU_IOAS_MAP_READABLE))
584
return -1;
585
586
if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id,
587
MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
588
return -1;
589
590
fail_nth_enable();
591
592
{
593
struct iommu_test_cmd access_cmd = {
594
.size = sizeof(access_cmd),
595
.op = IOMMU_TEST_OP_ACCESS_PAGES,
596
.id = self->access_id,
597
.access_pages = { .iova = iova,
598
.length = BUFFER_SIZE,
599
.uptr = (uintptr_t)buffer },
600
};
601
602
if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
603
&access_cmd))
604
return -1;
605
access_pages_id = access_cmd.access_pages.out_access_pages_id;
606
}
607
608
if (_test_cmd_destroy_access_pages(self->fd, self->access_id,
609
access_pages_id))
610
return -1;
611
612
if (_test_cmd_destroy_access(self->access_id))
613
return -1;
614
self->access_id = 0;
615
616
if (_test_ioctl_destroy(self->fd, stdev_id))
617
return -1;
618
return 0;
619
}
620
621
/* device.c */
622
TEST_FAIL_NTH(basic_fail_nth, device)
623
{
624
struct iommu_hwpt_selftest data = {
625
.iotlb = IOMMU_TEST_IOTLB_DEFAULT,
626
};
627
struct iommu_test_hw_info info;
628
uint32_t fault_id, fault_fd;
629
uint32_t veventq_id, veventq_fd;
630
uint32_t fault_hwpt_id;
631
uint32_t test_hwpt_id;
632
uint32_t ioas_id;
633
uint32_t ioas_id2;
634
uint32_t idev_id;
635
uint32_t hwpt_id;
636
uint32_t viommu_id;
637
uint32_t hw_queue_id;
638
uint32_t vdev_id;
639
__u64 iova;
640
641
self->fd = open("/dev/iommu", O_RDWR);
642
if (self->fd == -1)
643
return -1;
644
645
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
646
return -1;
647
648
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id2))
649
return -1;
650
651
iova = MOCK_APERTURE_START;
652
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, PAGE_SIZE, &iova,
653
IOMMU_IOAS_MAP_FIXED_IOVA |
654
IOMMU_IOAS_MAP_WRITEABLE |
655
IOMMU_IOAS_MAP_READABLE))
656
return -1;
657
if (_test_ioctl_ioas_map(self->fd, ioas_id2, buffer, PAGE_SIZE, &iova,
658
IOMMU_IOAS_MAP_FIXED_IOVA |
659
IOMMU_IOAS_MAP_WRITEABLE |
660
IOMMU_IOAS_MAP_READABLE))
661
return -1;
662
663
fail_nth_enable();
664
665
if (_test_cmd_mock_domain_flags(self->fd, ioas_id,
666
MOCK_FLAGS_DEVICE_PASID,
667
&self->stdev_id, NULL, &idev_id))
668
return -1;
669
670
if (_test_cmd_get_hw_info(self->fd, idev_id, IOMMU_HW_INFO_TYPE_DEFAULT,
671
&info, sizeof(info), NULL, NULL))
672
return -1;
673
674
if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0,
675
IOMMU_HWPT_ALLOC_PASID, &hwpt_id,
676
IOMMU_HWPT_DATA_NONE, 0, 0))
677
return -1;
678
679
if (_test_cmd_mock_domain_replace(self->fd, self->stdev_id, ioas_id2, NULL))
680
return -1;
681
682
if (_test_cmd_mock_domain_replace(self->fd, self->stdev_id, hwpt_id, NULL))
683
return -1;
684
685
if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0,
686
IOMMU_HWPT_ALLOC_NEST_PARENT |
687
IOMMU_HWPT_ALLOC_PASID,
688
&hwpt_id,
689
IOMMU_HWPT_DATA_NONE, 0, 0))
690
return -1;
691
692
if (_test_cmd_viommu_alloc(self->fd, idev_id, hwpt_id, 0,
693
IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
694
&viommu_id))
695
return -1;
696
697
if (_test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, 0, &vdev_id))
698
return -1;
699
700
if (_test_cmd_hw_queue_alloc(self->fd, viommu_id,
701
IOMMU_HW_QUEUE_TYPE_SELFTEST, 0, iova,
702
PAGE_SIZE, &hw_queue_id))
703
return -1;
704
705
if (_test_ioctl_fault_alloc(self->fd, &fault_id, &fault_fd))
706
return -1;
707
close(fault_fd);
708
709
if (_test_cmd_hwpt_alloc(self->fd, idev_id, hwpt_id, fault_id,
710
IOMMU_HWPT_FAULT_ID_VALID, &fault_hwpt_id,
711
IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data)))
712
return -1;
713
714
if (_test_cmd_veventq_alloc(self->fd, viommu_id,
715
IOMMU_VEVENTQ_TYPE_SELFTEST, &veventq_id,
716
&veventq_fd))
717
return -1;
718
close(veventq_fd);
719
720
if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0,
721
IOMMU_HWPT_ALLOC_PASID,
722
&test_hwpt_id,
723
IOMMU_HWPT_DATA_NONE, 0, 0))
724
return -1;
725
726
/* Tests for pasid attach/replace/detach */
727
728
self->pasid = 200;
729
730
if (_test_cmd_pasid_attach(self->fd, self->stdev_id,
731
self->pasid, hwpt_id)) {
732
self->pasid = 0;
733
return -1;
734
}
735
736
if (_test_cmd_pasid_replace(self->fd, self->stdev_id,
737
self->pasid, test_hwpt_id))
738
return -1;
739
740
if (_test_cmd_pasid_detach(self->fd, self->stdev_id, self->pasid))
741
return -1;
742
743
self->pasid = 0;
744
745
return 0;
746
}
747
748
TEST_HARNESS_MAIN
749
750