Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/selftests/iommu/iommufd_utils.h
51892 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3
#ifndef __SELFTEST_IOMMUFD_UTILS
4
#define __SELFTEST_IOMMUFD_UTILS
5
6
#include <unistd.h>
7
#include <stddef.h>
8
#include <sys/fcntl.h>
9
#include <sys/ioctl.h>
10
#include <stdint.h>
11
#include <assert.h>
12
#include <poll.h>
13
14
#include "kselftest_harness.h"
15
#include "../../../../drivers/iommu/iommufd/iommufd_test.h"
16
17
/* Hack to make assertions more readable */
18
#define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD
19
20
/* Imported from include/asm-generic/bitops/generic-non-atomic.h */
21
#define BITS_PER_BYTE 8
22
#define BITS_PER_LONG __BITS_PER_LONG
23
#define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG))
24
#define BIT_WORD(nr) ((nr) / __BITS_PER_LONG)
25
26
enum {
27
IOPT_PAGES_ACCOUNT_NONE = 0,
28
IOPT_PAGES_ACCOUNT_USER = 1,
29
IOPT_PAGES_ACCOUNT_MM = 2,
30
};
31
32
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
33
34
static inline void set_bit(unsigned int nr, unsigned long *addr)
35
{
36
unsigned long mask = BIT_MASK(nr);
37
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
38
39
*p |= mask;
40
}
41
42
static inline bool test_bit(unsigned int nr, unsigned long *addr)
43
{
44
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)));
45
}
46
47
static void *buffer;
48
static unsigned long BUFFER_SIZE;
49
50
static void *mfd_buffer;
51
static int mfd;
52
53
static unsigned long PAGE_SIZE;
54
55
#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
56
#define offsetofend(TYPE, MEMBER) \
57
(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
58
59
#define test_err_mmap(_errno, length, offset) \
60
EXPECT_ERRNO(_errno, (long)mmap(NULL, length, PROT_READ | PROT_WRITE, \
61
MAP_SHARED, self->fd, offset))
62
63
static inline void *memfd_mmap(size_t length, int prot, int flags, int *mfd_p)
64
{
65
int mfd_flags = (flags & MAP_HUGETLB) ? MFD_HUGETLB : 0;
66
int mfd = memfd_create("buffer", mfd_flags);
67
void *buf = MAP_FAILED;
68
69
if (mfd <= 0)
70
return MAP_FAILED;
71
if (ftruncate(mfd, length))
72
goto out;
73
*mfd_p = mfd;
74
buf = mmap(0, length, prot, flags, mfd, 0);
75
out:
76
if (buf == MAP_FAILED)
77
close(mfd);
78
return buf;
79
}
80
81
/*
82
* Have the kernel check the refcount on pages. I don't know why a freshly
83
* mmap'd anon non-compound page starts out with a ref of 3
84
*/
85
#define check_refs(_ptr, _length, _refs) \
86
({ \
87
struct iommu_test_cmd test_cmd = { \
88
.size = sizeof(test_cmd), \
89
.op = IOMMU_TEST_OP_MD_CHECK_REFS, \
90
.check_refs = { .length = _length, \
91
.uptr = (uintptr_t)(_ptr), \
92
.refs = _refs }, \
93
}; \
94
ASSERT_EQ(0, \
95
ioctl(self->fd, \
96
_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS), \
97
&test_cmd)); \
98
})
99
100
static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id,
101
__u32 *hwpt_id, __u32 *idev_id)
102
{
103
struct iommu_test_cmd cmd = {
104
.size = sizeof(cmd),
105
.op = IOMMU_TEST_OP_MOCK_DOMAIN,
106
.id = ioas_id,
107
.mock_domain = {},
108
};
109
int ret;
110
111
ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
112
if (ret)
113
return ret;
114
if (stdev_id)
115
*stdev_id = cmd.mock_domain.out_stdev_id;
116
assert(cmd.id != 0);
117
if (hwpt_id)
118
*hwpt_id = cmd.mock_domain.out_hwpt_id;
119
if (idev_id)
120
*idev_id = cmd.mock_domain.out_idev_id;
121
return 0;
122
}
123
#define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id, idev_id) \
124
ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, stdev_id, \
125
hwpt_id, idev_id))
126
#define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id) \
127
EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \
128
stdev_id, hwpt_id, NULL))
129
130
static int _test_cmd_mock_domain_flags(int fd, unsigned int ioas_id,
131
__u32 stdev_flags, __u32 *stdev_id,
132
__u32 *hwpt_id, __u32 *idev_id)
133
{
134
struct iommu_test_cmd cmd = {
135
.size = sizeof(cmd),
136
.op = IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS,
137
.id = ioas_id,
138
.mock_domain_flags = { .dev_flags = stdev_flags },
139
};
140
int ret;
141
142
ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
143
if (ret)
144
return ret;
145
if (stdev_id)
146
*stdev_id = cmd.mock_domain_flags.out_stdev_id;
147
assert(cmd.id != 0);
148
if (hwpt_id)
149
*hwpt_id = cmd.mock_domain_flags.out_hwpt_id;
150
if (idev_id)
151
*idev_id = cmd.mock_domain_flags.out_idev_id;
152
return 0;
153
}
154
#define test_cmd_mock_domain_flags(ioas_id, flags, stdev_id, hwpt_id, idev_id) \
155
ASSERT_EQ(0, _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \
156
stdev_id, hwpt_id, idev_id))
157
#define test_err_mock_domain_flags(_errno, ioas_id, flags, stdev_id, hwpt_id) \
158
EXPECT_ERRNO(_errno, \
159
_test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \
160
stdev_id, hwpt_id, NULL))
161
162
static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id,
163
__u32 *hwpt_id)
164
{
165
struct iommu_test_cmd cmd = {
166
.size = sizeof(cmd),
167
.op = IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE,
168
.id = stdev_id,
169
.mock_domain_replace = {
170
.pt_id = pt_id,
171
},
172
};
173
int ret;
174
175
ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
176
if (ret)
177
return ret;
178
if (hwpt_id)
179
*hwpt_id = cmd.mock_domain_replace.pt_id;
180
return 0;
181
}
182
183
#define test_cmd_mock_domain_replace(stdev_id, pt_id) \
184
ASSERT_EQ(0, _test_cmd_mock_domain_replace(self->fd, stdev_id, pt_id, \
185
NULL))
186
#define test_err_mock_domain_replace(_errno, stdev_id, pt_id) \
187
EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \
188
pt_id, NULL))
189
190
static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, __u32 ft_id,
191
__u32 flags, __u32 *hwpt_id, __u32 data_type,
192
void *data, size_t data_len)
193
{
194
struct iommu_hwpt_alloc cmd = {
195
.size = sizeof(cmd),
196
.flags = flags,
197
.dev_id = device_id,
198
.pt_id = pt_id,
199
.data_type = data_type,
200
.data_len = data_len,
201
.data_uptr = (uint64_t)data,
202
.fault_id = ft_id,
203
};
204
int ret;
205
206
ret = ioctl(fd, IOMMU_HWPT_ALLOC, &cmd);
207
if (ret)
208
return ret;
209
if (hwpt_id)
210
*hwpt_id = cmd.out_hwpt_id;
211
return 0;
212
}
213
214
#define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id) \
215
ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
216
hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \
217
0))
218
#define test_cmd_hwpt_alloc_iommupt(device_id, pt_id, flags, iommupt_type, \
219
hwpt_id) \
220
({ \
221
struct iommu_hwpt_selftest user_cfg = { \
222
.pagetable_type = iommupt_type \
223
}; \
224
\
225
ASSERT_EQ(0, _test_cmd_hwpt_alloc( \
226
self->fd, device_id, pt_id, 0, flags, \
227
hwpt_id, IOMMU_HWPT_DATA_SELFTEST, \
228
&user_cfg, sizeof(user_cfg))); \
229
})
230
#define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id) \
231
EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc( \
232
self->fd, device_id, pt_id, 0, flags, \
233
hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, 0))
234
235
#define test_cmd_hwpt_alloc_nested(device_id, pt_id, flags, hwpt_id, \
236
data_type, data, data_len) \
237
ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
238
hwpt_id, data_type, data, data_len))
239
#define test_err_hwpt_alloc_nested(_errno, device_id, pt_id, flags, hwpt_id, \
240
data_type, data, data_len) \
241
EXPECT_ERRNO(_errno, \
242
_test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
243
hwpt_id, data_type, data, data_len))
244
245
#define test_cmd_hwpt_alloc_iopf(device_id, pt_id, fault_id, flags, hwpt_id, \
246
data_type, data, data_len) \
247
ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
248
flags, hwpt_id, data_type, data, \
249
data_len))
250
#define test_err_hwpt_alloc_iopf(_errno, device_id, pt_id, fault_id, flags, \
251
hwpt_id, data_type, data, data_len) \
252
EXPECT_ERRNO(_errno, \
253
_test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
254
flags, hwpt_id, data_type, data, \
255
data_len))
256
257
#define test_cmd_hwpt_check_iotlb(hwpt_id, iotlb_id, expected) \
258
({ \
259
struct iommu_test_cmd test_cmd = { \
260
.size = sizeof(test_cmd), \
261
.op = IOMMU_TEST_OP_MD_CHECK_IOTLB, \
262
.id = hwpt_id, \
263
.check_iotlb = { \
264
.id = iotlb_id, \
265
.iotlb = expected, \
266
}, \
267
}; \
268
ASSERT_EQ(0, \
269
ioctl(self->fd, \
270
_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_IOTLB), \
271
&test_cmd)); \
272
})
273
274
#define test_cmd_hwpt_check_iotlb_all(hwpt_id, expected) \
275
({ \
276
int i; \
277
for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++) \
278
test_cmd_hwpt_check_iotlb(hwpt_id, i, expected); \
279
})
280
281
#define test_cmd_dev_check_cache(device_id, cache_id, expected) \
282
({ \
283
struct iommu_test_cmd test_cmd = { \
284
.size = sizeof(test_cmd), \
285
.op = IOMMU_TEST_OP_DEV_CHECK_CACHE, \
286
.id = device_id, \
287
.check_dev_cache = { \
288
.id = cache_id, \
289
.cache = expected, \
290
}, \
291
}; \
292
ASSERT_EQ(0, ioctl(self->fd, \
293
_IOMMU_TEST_CMD( \
294
IOMMU_TEST_OP_DEV_CHECK_CACHE), \
295
&test_cmd)); \
296
})
297
298
#define test_cmd_dev_check_cache_all(device_id, expected) \
299
({ \
300
int c; \
301
for (c = 0; c < MOCK_DEV_CACHE_NUM; c++) \
302
test_cmd_dev_check_cache(device_id, c, expected); \
303
})
304
305
static int _test_cmd_hwpt_invalidate(int fd, __u32 hwpt_id, void *reqs,
306
uint32_t data_type, uint32_t lreq,
307
uint32_t *nreqs)
308
{
309
struct iommu_hwpt_invalidate cmd = {
310
.size = sizeof(cmd),
311
.hwpt_id = hwpt_id,
312
.data_type = data_type,
313
.data_uptr = (uint64_t)reqs,
314
.entry_len = lreq,
315
.entry_num = *nreqs,
316
};
317
int rc = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cmd);
318
*nreqs = cmd.entry_num;
319
return rc;
320
}
321
322
#define test_cmd_hwpt_invalidate(hwpt_id, reqs, data_type, lreq, nreqs) \
323
({ \
324
ASSERT_EQ(0, \
325
_test_cmd_hwpt_invalidate(self->fd, hwpt_id, reqs, \
326
data_type, lreq, nreqs)); \
327
})
328
#define test_err_hwpt_invalidate(_errno, hwpt_id, reqs, data_type, lreq, \
329
nreqs) \
330
({ \
331
EXPECT_ERRNO(_errno, _test_cmd_hwpt_invalidate( \
332
self->fd, hwpt_id, reqs, \
333
data_type, lreq, nreqs)); \
334
})
335
336
static int _test_cmd_viommu_invalidate(int fd, __u32 viommu_id, void *reqs,
337
uint32_t data_type, uint32_t lreq,
338
uint32_t *nreqs)
339
{
340
struct iommu_hwpt_invalidate cmd = {
341
.size = sizeof(cmd),
342
.hwpt_id = viommu_id,
343
.data_type = data_type,
344
.data_uptr = (uint64_t)reqs,
345
.entry_len = lreq,
346
.entry_num = *nreqs,
347
};
348
int rc = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cmd);
349
*nreqs = cmd.entry_num;
350
return rc;
351
}
352
353
#define test_cmd_viommu_invalidate(viommu, reqs, lreq, nreqs) \
354
({ \
355
ASSERT_EQ(0, \
356
_test_cmd_viommu_invalidate(self->fd, viommu, reqs, \
357
IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, \
358
lreq, nreqs)); \
359
})
360
#define test_err_viommu_invalidate(_errno, viommu_id, reqs, data_type, lreq, \
361
nreqs) \
362
({ \
363
EXPECT_ERRNO(_errno, _test_cmd_viommu_invalidate( \
364
self->fd, viommu_id, reqs, \
365
data_type, lreq, nreqs)); \
366
})
367
368
static int _test_cmd_access_replace_ioas(int fd, __u32 access_id,
369
unsigned int ioas_id)
370
{
371
struct iommu_test_cmd cmd = {
372
.size = sizeof(cmd),
373
.op = IOMMU_TEST_OP_ACCESS_REPLACE_IOAS,
374
.id = access_id,
375
.access_replace_ioas = { .ioas_id = ioas_id },
376
};
377
int ret;
378
379
ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
380
if (ret)
381
return ret;
382
return 0;
383
}
384
#define test_cmd_access_replace_ioas(access_id, ioas_id) \
385
ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id))
386
387
static int _test_cmd_set_dirty_tracking(int fd, __u32 hwpt_id, bool enabled)
388
{
389
struct iommu_hwpt_set_dirty_tracking cmd = {
390
.size = sizeof(cmd),
391
.flags = enabled ? IOMMU_HWPT_DIRTY_TRACKING_ENABLE : 0,
392
.hwpt_id = hwpt_id,
393
};
394
int ret;
395
396
ret = ioctl(fd, IOMMU_HWPT_SET_DIRTY_TRACKING, &cmd);
397
if (ret)
398
return -errno;
399
return 0;
400
}
401
#define test_cmd_set_dirty_tracking(hwpt_id, enabled) \
402
ASSERT_EQ(0, _test_cmd_set_dirty_tracking(self->fd, hwpt_id, enabled))
403
404
static int _test_cmd_get_dirty_bitmap(int fd, __u32 hwpt_id, size_t length,
405
__u64 iova, size_t page_size,
406
__u64 *bitmap, __u32 flags)
407
{
408
struct iommu_hwpt_get_dirty_bitmap cmd = {
409
.size = sizeof(cmd),
410
.hwpt_id = hwpt_id,
411
.flags = flags,
412
.iova = iova,
413
.length = length,
414
.page_size = page_size,
415
.data = (uintptr_t)bitmap,
416
};
417
int ret;
418
419
ret = ioctl(fd, IOMMU_HWPT_GET_DIRTY_BITMAP, &cmd);
420
if (ret)
421
return ret;
422
return 0;
423
}
424
425
#define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, \
426
bitmap, flags) \
427
ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \
428
page_size, bitmap, flags))
429
430
static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length,
431
__u64 iova, size_t page_size,
432
__u64 *bitmap, __u64 *dirty)
433
{
434
struct iommu_test_cmd cmd = {
435
.size = sizeof(cmd),
436
.op = IOMMU_TEST_OP_DIRTY,
437
.id = hwpt_id,
438
.dirty = {
439
.iova = iova,
440
.length = length,
441
.page_size = page_size,
442
.uptr = (uintptr_t)bitmap,
443
}
444
};
445
int ret;
446
447
ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY), &cmd);
448
if (ret)
449
return -ret;
450
if (dirty)
451
*dirty = cmd.dirty.out_nr_dirty;
452
return 0;
453
}
454
455
#define test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, \
456
bitmap, nr) \
457
ASSERT_EQ(0, \
458
_test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \
459
page_size, bitmap, nr))
460
461
static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
462
__u64 iova, size_t page_size,
463
size_t pte_page_size, __u64 *bitmap,
464
__u64 nbits, __u32 flags,
465
struct __test_metadata *_metadata)
466
{
467
unsigned long npte = pte_page_size / page_size, pteset = 2 * npte;
468
unsigned long j, i, nr = nbits / pteset ?: 1;
469
unsigned long bitmap_size = DIV_ROUND_UP(nbits, BITS_PER_BYTE);
470
__u64 out_dirty = 0;
471
472
/* Mark all even bits as dirty in the mock domain */
473
memset(bitmap, 0, bitmap_size);
474
for (i = 0; i < nbits; i += pteset)
475
set_bit(i, (unsigned long *)bitmap);
476
477
test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size,
478
bitmap, &out_dirty);
479
ASSERT_EQ(nr, out_dirty);
480
481
/* Expect all even bits as dirty in the user bitmap */
482
memset(bitmap, 0, bitmap_size);
483
test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
484
flags);
485
/* Beware ASSERT_EQ() is two statements -- braces are not redundant! */
486
for (i = 0; i < nbits; i += pteset) {
487
for (j = 0; j < pteset; j++) {
488
ASSERT_EQ(j < npte,
489
test_bit(i + j, (unsigned long *)bitmap));
490
}
491
ASSERT_EQ(!(i % pteset), test_bit(i, (unsigned long *)bitmap));
492
}
493
494
memset(bitmap, 0, bitmap_size);
495
test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
496
flags);
497
498
/* It as read already -- expect all zeroes */
499
for (i = 0; i < nbits; i += pteset) {
500
for (j = 0; j < pteset; j++) {
501
ASSERT_EQ(
502
(j < npte) &&
503
(flags &
504
IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR),
505
test_bit(i + j, (unsigned long *)bitmap));
506
}
507
}
508
509
return 0;
510
}
511
#define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, pte_size,\
512
bitmap, bitmap_size, flags, _metadata) \
513
ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \
514
page_size, pte_size, bitmap, \
515
bitmap_size, flags, _metadata))
516
517
static int _test_cmd_create_access(int fd, unsigned int ioas_id,
518
__u32 *access_id, unsigned int flags)
519
{
520
struct iommu_test_cmd cmd = {
521
.size = sizeof(cmd),
522
.op = IOMMU_TEST_OP_CREATE_ACCESS,
523
.id = ioas_id,
524
.create_access = { .flags = flags },
525
};
526
int ret;
527
528
ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
529
if (ret)
530
return ret;
531
*access_id = cmd.create_access.out_access_fd;
532
return 0;
533
}
534
#define test_cmd_create_access(ioas_id, access_id, flags) \
535
ASSERT_EQ(0, _test_cmd_create_access(self->fd, ioas_id, access_id, \
536
flags))
537
538
static int _test_cmd_destroy_access(unsigned int access_id)
539
{
540
return close(access_id);
541
}
542
#define test_cmd_destroy_access(access_id) \
543
ASSERT_EQ(0, _test_cmd_destroy_access(access_id))
544
545
static int _test_cmd_destroy_access_pages(int fd, unsigned int access_id,
546
unsigned int access_pages_id)
547
{
548
struct iommu_test_cmd cmd = {
549
.size = sizeof(cmd),
550
.op = IOMMU_TEST_OP_DESTROY_ACCESS_PAGES,
551
.id = access_id,
552
.destroy_access_pages = { .access_pages_id = access_pages_id },
553
};
554
return ioctl(fd, IOMMU_TEST_CMD, &cmd);
555
}
556
#define test_cmd_destroy_access_pages(access_id, access_pages_id) \
557
ASSERT_EQ(0, _test_cmd_destroy_access_pages(self->fd, access_id, \
558
access_pages_id))
559
#define test_err_destroy_access_pages(_errno, access_id, access_pages_id) \
560
EXPECT_ERRNO(_errno, _test_cmd_destroy_access_pages( \
561
self->fd, access_id, access_pages_id))
562
563
static int _test_cmd_get_dmabuf(int fd, size_t len, int *out_fd)
564
{
565
struct iommu_test_cmd cmd = {
566
.size = sizeof(cmd),
567
.op = IOMMU_TEST_OP_DMABUF_GET,
568
.dmabuf_get = { .length = len, .open_flags = O_CLOEXEC },
569
};
570
571
*out_fd = ioctl(fd, IOMMU_TEST_CMD, &cmd);
572
if (*out_fd < 0)
573
return -1;
574
return 0;
575
}
576
#define test_cmd_get_dmabuf(len, out_fd) \
577
ASSERT_EQ(0, _test_cmd_get_dmabuf(self->fd, len, out_fd))
578
579
static int _test_cmd_revoke_dmabuf(int fd, int dmabuf_fd, bool revoked)
580
{
581
struct iommu_test_cmd cmd = {
582
.size = sizeof(cmd),
583
.op = IOMMU_TEST_OP_DMABUF_REVOKE,
584
.dmabuf_revoke = { .dmabuf_fd = dmabuf_fd, .revoked = revoked },
585
};
586
int ret;
587
588
ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
589
if (ret < 0)
590
return -1;
591
return 0;
592
}
593
#define test_cmd_revoke_dmabuf(dmabuf_fd, revoke) \
594
ASSERT_EQ(0, _test_cmd_revoke_dmabuf(self->fd, dmabuf_fd, revoke))
595
596
static int _test_ioctl_destroy(int fd, unsigned int id)
597
{
598
struct iommu_destroy cmd = {
599
.size = sizeof(cmd),
600
.id = id,
601
};
602
return ioctl(fd, IOMMU_DESTROY, &cmd);
603
}
604
#define test_ioctl_destroy(id) ASSERT_EQ(0, _test_ioctl_destroy(self->fd, id))
605
606
static int _test_ioctl_ioas_alloc(int fd, __u32 *id)
607
{
608
struct iommu_ioas_alloc cmd = {
609
.size = sizeof(cmd),
610
};
611
int ret;
612
613
ret = ioctl(fd, IOMMU_IOAS_ALLOC, &cmd);
614
if (ret)
615
return ret;
616
*id = cmd.out_ioas_id;
617
return 0;
618
}
619
#define test_ioctl_ioas_alloc(id) \
620
({ \
621
ASSERT_EQ(0, _test_ioctl_ioas_alloc(self->fd, id)); \
622
ASSERT_NE(0, *(id)); \
623
})
624
625
static int _test_ioctl_ioas_map(int fd, unsigned int ioas_id, void *buffer,
626
size_t length, __u64 *iova, unsigned int flags)
627
{
628
struct iommu_ioas_map cmd = {
629
.size = sizeof(cmd),
630
.flags = flags,
631
.ioas_id = ioas_id,
632
.user_va = (uintptr_t)buffer,
633
.length = length,
634
};
635
int ret;
636
637
if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
638
cmd.iova = *iova;
639
640
ret = ioctl(fd, IOMMU_IOAS_MAP, &cmd);
641
*iova = cmd.iova;
642
return ret;
643
}
644
#define test_ioctl_ioas_map(buffer, length, iova_p) \
645
ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
646
length, iova_p, \
647
IOMMU_IOAS_MAP_WRITEABLE | \
648
IOMMU_IOAS_MAP_READABLE))
649
650
#define test_err_ioctl_ioas_map(_errno, buffer, length, iova_p) \
651
EXPECT_ERRNO(_errno, \
652
_test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
653
length, iova_p, \
654
IOMMU_IOAS_MAP_WRITEABLE | \
655
IOMMU_IOAS_MAP_READABLE))
656
657
#define test_ioctl_ioas_map_id(ioas_id, buffer, length, iova_p) \
658
ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, ioas_id, buffer, length, \
659
iova_p, \
660
IOMMU_IOAS_MAP_WRITEABLE | \
661
IOMMU_IOAS_MAP_READABLE))
662
663
#define test_ioctl_ioas_map_fixed(buffer, length, iova) \
664
({ \
665
__u64 __iova = iova; \
666
ASSERT_EQ(0, _test_ioctl_ioas_map( \
667
self->fd, self->ioas_id, buffer, length, \
668
&__iova, \
669
IOMMU_IOAS_MAP_FIXED_IOVA | \
670
IOMMU_IOAS_MAP_WRITEABLE | \
671
IOMMU_IOAS_MAP_READABLE)); \
672
})
673
674
#define test_ioctl_ioas_map_fixed_id(ioas_id, buffer, length, iova) \
675
({ \
676
__u64 __iova = iova; \
677
ASSERT_EQ(0, \
678
_test_ioctl_ioas_map( \
679
self->fd, ioas_id, buffer, length, &__iova, \
680
IOMMU_IOAS_MAP_FIXED_IOVA | \
681
IOMMU_IOAS_MAP_WRITEABLE | \
682
IOMMU_IOAS_MAP_READABLE)); \
683
})
684
685
#define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova) \
686
({ \
687
__u64 __iova = iova; \
688
EXPECT_ERRNO(_errno, \
689
_test_ioctl_ioas_map( \
690
self->fd, self->ioas_id, buffer, length, \
691
&__iova, \
692
IOMMU_IOAS_MAP_FIXED_IOVA | \
693
IOMMU_IOAS_MAP_WRITEABLE | \
694
IOMMU_IOAS_MAP_READABLE)); \
695
})
696
697
static int _test_ioctl_ioas_unmap(int fd, unsigned int ioas_id, uint64_t iova,
698
size_t length, uint64_t *out_len)
699
{
700
struct iommu_ioas_unmap cmd = {
701
.size = sizeof(cmd),
702
.ioas_id = ioas_id,
703
.iova = iova,
704
.length = length,
705
};
706
int ret;
707
708
ret = ioctl(fd, IOMMU_IOAS_UNMAP, &cmd);
709
if (out_len)
710
*out_len = cmd.length;
711
return ret;
712
}
713
#define test_ioctl_ioas_unmap(iova, length) \
714
ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, iova, \
715
length, NULL))
716
717
#define test_ioctl_ioas_unmap_id(ioas_id, iova, length) \
718
ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, length, \
719
NULL))
720
721
#define test_err_ioctl_ioas_unmap(_errno, iova, length) \
722
EXPECT_ERRNO(_errno, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, \
723
iova, length, NULL))
724
725
static int _test_ioctl_ioas_map_file(int fd, unsigned int ioas_id, int mfd,
726
size_t start, size_t length, __u64 *iova,
727
unsigned int flags)
728
{
729
struct iommu_ioas_map_file cmd = {
730
.size = sizeof(cmd),
731
.flags = flags,
732
.ioas_id = ioas_id,
733
.fd = mfd,
734
.start = start,
735
.length = length,
736
};
737
int ret;
738
739
if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
740
cmd.iova = *iova;
741
742
ret = ioctl(fd, IOMMU_IOAS_MAP_FILE, &cmd);
743
*iova = cmd.iova;
744
return ret;
745
}
746
747
#define test_ioctl_ioas_map_file(mfd, start, length, iova_p) \
748
ASSERT_EQ(0, \
749
_test_ioctl_ioas_map_file( \
750
self->fd, self->ioas_id, mfd, start, length, iova_p, \
751
IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
752
753
#define test_err_ioctl_ioas_map_file(_errno, mfd, start, length, iova_p) \
754
EXPECT_ERRNO( \
755
_errno, \
756
_test_ioctl_ioas_map_file( \
757
self->fd, self->ioas_id, mfd, start, length, iova_p, \
758
IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
759
760
#define test_ioctl_ioas_map_id_file(ioas_id, mfd, start, length, iova_p) \
761
ASSERT_EQ(0, \
762
_test_ioctl_ioas_map_file( \
763
self->fd, ioas_id, mfd, start, length, iova_p, \
764
IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
765
766
#define test_ioctl_ioas_map_fixed_file(mfd, start, length, iova) \
767
({ \
768
__u64 __iova = iova; \
769
ASSERT_EQ(0, _test_ioctl_ioas_map_file( \
770
self->fd, self->ioas_id, mfd, start, \
771
length, &__iova, \
772
IOMMU_IOAS_MAP_FIXED_IOVA | \
773
IOMMU_IOAS_MAP_WRITEABLE | \
774
IOMMU_IOAS_MAP_READABLE)); \
775
})
776
777
static int _test_ioctl_set_temp_memory_limit(int fd, unsigned int limit)
778
{
779
struct iommu_test_cmd memlimit_cmd = {
780
.size = sizeof(memlimit_cmd),
781
.op = IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT,
782
.memory_limit = { .limit = limit },
783
};
784
785
return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT),
786
&memlimit_cmd);
787
}
788
789
#define test_ioctl_set_temp_memory_limit(limit) \
790
ASSERT_EQ(0, _test_ioctl_set_temp_memory_limit(self->fd, limit))
791
792
#define test_ioctl_set_default_memory_limit() \
793
test_ioctl_set_temp_memory_limit(65536)
794
795
static void teardown_iommufd(int fd, struct __test_metadata *_metadata)
796
{
797
struct iommu_test_cmd test_cmd = {
798
.size = sizeof(test_cmd),
799
.op = IOMMU_TEST_OP_MD_CHECK_REFS,
800
.check_refs = { .length = BUFFER_SIZE,
801
.uptr = (uintptr_t)buffer },
802
};
803
804
if (fd == -1)
805
return;
806
807
EXPECT_EQ(0, close(fd));
808
809
fd = open("/dev/iommu", O_RDWR);
810
EXPECT_NE(-1, fd);
811
EXPECT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS),
812
&test_cmd));
813
EXPECT_EQ(0, close(fd));
814
}
815
816
#define EXPECT_ERRNO(expected_errno, cmd) \
817
({ \
818
ASSERT_EQ(-1, cmd); \
819
EXPECT_EQ(expected_errno, errno); \
820
})
821
822
#endif
823
824
/* @data can be NULL */
825
static int _test_cmd_get_hw_info(int fd, __u32 device_id, __u32 data_type,
826
void *data, size_t data_len,
827
uint32_t *capabilities, uint8_t *max_pasid)
828
{
829
struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data;
830
struct iommu_hw_info cmd = {
831
.size = sizeof(cmd),
832
.dev_id = device_id,
833
.data_len = data_len,
834
.in_data_type = data_type,
835
.data_uptr = (uint64_t)data,
836
.out_capabilities = 0,
837
};
838
int ret;
839
840
if (data_type != IOMMU_HW_INFO_TYPE_DEFAULT)
841
cmd.flags |= IOMMU_HW_INFO_FLAG_INPUT_TYPE;
842
843
ret = ioctl(fd, IOMMU_GET_HW_INFO, &cmd);
844
if (ret)
845
return ret;
846
847
assert(cmd.out_data_type == IOMMU_HW_INFO_TYPE_SELFTEST);
848
849
/*
850
* The struct iommu_test_hw_info should be the one defined
851
* by the current kernel.
852
*/
853
assert(cmd.data_len == sizeof(struct iommu_test_hw_info));
854
855
/*
856
* Trailing bytes should be 0 if user buffer is larger than
857
* the data that kernel reports.
858
*/
859
if (data_len > cmd.data_len) {
860
char *ptr = (char *)(data + cmd.data_len);
861
int idx = 0;
862
863
while (idx < data_len - cmd.data_len) {
864
assert(!*(ptr + idx));
865
idx++;
866
}
867
}
868
869
if (info) {
870
if (data_len >= offsetofend(struct iommu_test_hw_info, test_reg))
871
assert(info->test_reg == IOMMU_HW_INFO_SELFTEST_REGVAL);
872
if (data_len >= offsetofend(struct iommu_test_hw_info, flags))
873
assert(!info->flags);
874
}
875
876
if (max_pasid)
877
*max_pasid = cmd.out_max_pasid_log2;
878
879
if (capabilities)
880
*capabilities = cmd.out_capabilities;
881
882
return 0;
883
}
884
885
#define test_cmd_get_hw_info(device_id, data_type, data, data_len) \
886
ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data_type, \
887
data, data_len, NULL, NULL))
888
889
#define test_err_get_hw_info(_errno, device_id, data_type, data, data_len) \
890
EXPECT_ERRNO(_errno, \
891
_test_cmd_get_hw_info(self->fd, device_id, data_type, \
892
data, data_len, NULL, NULL))
893
894
#define test_cmd_get_hw_capabilities(device_id, caps) \
895
ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, \
896
IOMMU_HW_INFO_TYPE_DEFAULT, NULL, \
897
0, &caps, NULL))
898
899
#define test_cmd_get_hw_info_pasid(device_id, max_pasid) \
900
ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, \
901
IOMMU_HW_INFO_TYPE_DEFAULT, NULL, \
902
0, NULL, max_pasid))
903
904
static int _test_ioctl_fault_alloc(int fd, __u32 *fault_id, __u32 *fault_fd)
905
{
906
struct iommu_fault_alloc cmd = {
907
.size = sizeof(cmd),
908
};
909
int ret;
910
911
ret = ioctl(fd, IOMMU_FAULT_QUEUE_ALLOC, &cmd);
912
if (ret)
913
return ret;
914
*fault_id = cmd.out_fault_id;
915
*fault_fd = cmd.out_fault_fd;
916
return 0;
917
}
918
919
#define test_ioctl_fault_alloc(fault_id, fault_fd) \
920
({ \
921
ASSERT_EQ(0, _test_ioctl_fault_alloc(self->fd, fault_id, \
922
fault_fd)); \
923
ASSERT_NE(0, *(fault_id)); \
924
ASSERT_NE(0, *(fault_fd)); \
925
})
926
927
static int _test_cmd_trigger_iopf(int fd, __u32 device_id, __u32 pasid,
928
__u32 fault_fd)
929
{
930
struct iommu_test_cmd trigger_iopf_cmd = {
931
.size = sizeof(trigger_iopf_cmd),
932
.op = IOMMU_TEST_OP_TRIGGER_IOPF,
933
.trigger_iopf = {
934
.dev_id = device_id,
935
.pasid = pasid,
936
.grpid = 0x2,
937
.perm = IOMMU_PGFAULT_PERM_READ | IOMMU_PGFAULT_PERM_WRITE,
938
.addr = 0xdeadbeaf,
939
},
940
};
941
struct iommu_hwpt_page_response response = {
942
.code = IOMMUFD_PAGE_RESP_SUCCESS,
943
};
944
struct iommu_hwpt_pgfault fault = {};
945
ssize_t bytes;
946
int ret;
947
948
ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_IOPF), &trigger_iopf_cmd);
949
if (ret)
950
return ret;
951
952
bytes = read(fault_fd, &fault, sizeof(fault));
953
if (bytes <= 0)
954
return -EIO;
955
956
response.cookie = fault.cookie;
957
958
bytes = write(fault_fd, &response, sizeof(response));
959
if (bytes <= 0)
960
return -EIO;
961
962
return 0;
963
}
964
965
#define test_cmd_trigger_iopf(device_id, fault_fd) \
966
ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, 0x1, fault_fd))
967
#define test_cmd_trigger_iopf_pasid(device_id, pasid, fault_fd) \
968
ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, \
969
pasid, fault_fd))
970
971
static int _test_cmd_viommu_alloc(int fd, __u32 device_id, __u32 hwpt_id,
972
__u32 flags, __u32 type, void *data,
973
__u32 data_len, __u32 *viommu_id)
974
{
975
struct iommu_viommu_alloc cmd = {
976
.size = sizeof(cmd),
977
.flags = flags,
978
.type = type,
979
.dev_id = device_id,
980
.hwpt_id = hwpt_id,
981
.data_uptr = (uint64_t)data,
982
.data_len = data_len,
983
};
984
int ret;
985
986
ret = ioctl(fd, IOMMU_VIOMMU_ALLOC, &cmd);
987
if (ret)
988
return ret;
989
if (viommu_id)
990
*viommu_id = cmd.out_viommu_id;
991
return 0;
992
}
993
994
#define test_cmd_viommu_alloc(device_id, hwpt_id, type, data, data_len, \
995
viommu_id) \
996
ASSERT_EQ(0, _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id, 0, \
997
type, data, data_len, viommu_id))
998
#define test_err_viommu_alloc(_errno, device_id, hwpt_id, type, data, \
999
data_len, viommu_id) \
1000
EXPECT_ERRNO(_errno, \
1001
_test_cmd_viommu_alloc(self->fd, device_id, hwpt_id, 0, \
1002
type, data, data_len, viommu_id))
1003
1004
static int _test_cmd_vdevice_alloc(int fd, __u32 viommu_id, __u32 idev_id,
1005
__u64 virt_id, __u32 *vdev_id)
1006
{
1007
struct iommu_vdevice_alloc cmd = {
1008
.size = sizeof(cmd),
1009
.dev_id = idev_id,
1010
.viommu_id = viommu_id,
1011
.virt_id = virt_id,
1012
};
1013
int ret;
1014
1015
ret = ioctl(fd, IOMMU_VDEVICE_ALLOC, &cmd);
1016
if (ret)
1017
return ret;
1018
if (vdev_id)
1019
*vdev_id = cmd.out_vdevice_id;
1020
return 0;
1021
}
1022
1023
#define test_cmd_vdevice_alloc(viommu_id, idev_id, virt_id, vdev_id) \
1024
ASSERT_EQ(0, _test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, \
1025
virt_id, vdev_id))
1026
#define test_err_vdevice_alloc(_errno, viommu_id, idev_id, virt_id, vdev_id) \
1027
EXPECT_ERRNO(_errno, \
1028
_test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, \
1029
virt_id, vdev_id))
1030
1031
static int _test_cmd_hw_queue_alloc(int fd, __u32 viommu_id, __u32 type,
1032
__u32 idx, __u64 base_addr, __u64 length,
1033
__u32 *hw_queue_id)
1034
{
1035
struct iommu_hw_queue_alloc cmd = {
1036
.size = sizeof(cmd),
1037
.viommu_id = viommu_id,
1038
.type = type,
1039
.index = idx,
1040
.nesting_parent_iova = base_addr,
1041
.length = length,
1042
};
1043
int ret;
1044
1045
ret = ioctl(fd, IOMMU_HW_QUEUE_ALLOC, &cmd);
1046
if (ret)
1047
return ret;
1048
if (hw_queue_id)
1049
*hw_queue_id = cmd.out_hw_queue_id;
1050
return 0;
1051
}
1052
1053
#define test_cmd_hw_queue_alloc(viommu_id, type, idx, base_addr, len, out_qid) \
1054
ASSERT_EQ(0, _test_cmd_hw_queue_alloc(self->fd, viommu_id, type, idx, \
1055
base_addr, len, out_qid))
1056
#define test_err_hw_queue_alloc(_errno, viommu_id, type, idx, base_addr, len, \
1057
out_qid) \
1058
EXPECT_ERRNO(_errno, \
1059
_test_cmd_hw_queue_alloc(self->fd, viommu_id, type, idx, \
1060
base_addr, len, out_qid))
1061
1062
static int _test_cmd_veventq_alloc(int fd, __u32 viommu_id, __u32 type,
1063
__u32 *veventq_id, __u32 *veventq_fd)
1064
{
1065
struct iommu_veventq_alloc cmd = {
1066
.size = sizeof(cmd),
1067
.type = type,
1068
.veventq_depth = 2,
1069
.viommu_id = viommu_id,
1070
};
1071
int ret;
1072
1073
ret = ioctl(fd, IOMMU_VEVENTQ_ALLOC, &cmd);
1074
if (ret)
1075
return ret;
1076
if (veventq_id)
1077
*veventq_id = cmd.out_veventq_id;
1078
if (veventq_fd)
1079
*veventq_fd = cmd.out_veventq_fd;
1080
return 0;
1081
}
1082
1083
#define test_cmd_veventq_alloc(viommu_id, type, veventq_id, veventq_fd) \
1084
ASSERT_EQ(0, _test_cmd_veventq_alloc(self->fd, viommu_id, type, \
1085
veventq_id, veventq_fd))
1086
#define test_err_veventq_alloc(_errno, viommu_id, type, veventq_id, \
1087
veventq_fd) \
1088
EXPECT_ERRNO(_errno, \
1089
_test_cmd_veventq_alloc(self->fd, viommu_id, type, \
1090
veventq_id, veventq_fd))
1091
1092
static int _test_cmd_trigger_vevents(int fd, __u32 dev_id, __u32 nvevents)
1093
{
1094
struct iommu_test_cmd trigger_vevent_cmd = {
1095
.size = sizeof(trigger_vevent_cmd),
1096
.op = IOMMU_TEST_OP_TRIGGER_VEVENT,
1097
.trigger_vevent = {
1098
.dev_id = dev_id,
1099
},
1100
};
1101
1102
while (nvevents--) {
1103
if (ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_VEVENT),
1104
&trigger_vevent_cmd))
1105
return -1;
1106
}
1107
return 0;
1108
}
1109
1110
#define test_cmd_trigger_vevents(dev_id, nvevents) \
1111
ASSERT_EQ(0, _test_cmd_trigger_vevents(self->fd, dev_id, nvevents))
1112
1113
static int _test_cmd_read_vevents(int fd, __u32 event_fd, __u32 nvevents,
1114
__u32 virt_id, int *prev_seq)
1115
{
1116
struct pollfd pollfd = { .fd = event_fd, .events = POLLIN };
1117
struct iommu_viommu_event_selftest *event;
1118
struct iommufd_vevent_header *hdr;
1119
ssize_t bytes;
1120
void *data;
1121
int ret, i;
1122
1123
ret = poll(&pollfd, 1, 1000);
1124
if (ret < 0)
1125
return -1;
1126
1127
data = calloc(nvevents, sizeof(*hdr) + sizeof(*event));
1128
if (!data) {
1129
errno = ENOMEM;
1130
return -1;
1131
}
1132
1133
bytes = read(event_fd, data,
1134
nvevents * (sizeof(*hdr) + sizeof(*event)));
1135
if (bytes <= 0) {
1136
errno = EFAULT;
1137
ret = -1;
1138
goto out_free;
1139
}
1140
1141
for (i = 0; i < nvevents; i++) {
1142
hdr = data + i * (sizeof(*hdr) + sizeof(*event));
1143
1144
if (hdr->flags & IOMMU_VEVENTQ_FLAG_LOST_EVENTS ||
1145
hdr->sequence - *prev_seq > 1) {
1146
*prev_seq = hdr->sequence;
1147
errno = EOVERFLOW;
1148
ret = -1;
1149
goto out_free;
1150
}
1151
*prev_seq = hdr->sequence;
1152
event = data + sizeof(*hdr);
1153
if (event->virt_id != virt_id) {
1154
errno = EINVAL;
1155
ret = -1;
1156
goto out_free;
1157
}
1158
}
1159
1160
ret = 0;
1161
out_free:
1162
free(data);
1163
return ret;
1164
}
1165
1166
#define test_cmd_read_vevents(event_fd, nvevents, virt_id, prev_seq) \
1167
ASSERT_EQ(0, _test_cmd_read_vevents(self->fd, event_fd, nvevents, \
1168
virt_id, prev_seq))
1169
#define test_err_read_vevents(_errno, event_fd, nvevents, virt_id, prev_seq) \
1170
EXPECT_ERRNO(_errno, \
1171
_test_cmd_read_vevents(self->fd, event_fd, nvevents, \
1172
virt_id, prev_seq))
1173
1174
static int _test_cmd_pasid_attach(int fd, __u32 stdev_id, __u32 pasid,
1175
__u32 pt_id)
1176
{
1177
struct iommu_test_cmd test_attach = {
1178
.size = sizeof(test_attach),
1179
.op = IOMMU_TEST_OP_PASID_ATTACH,
1180
.id = stdev_id,
1181
.pasid_attach = {
1182
.pasid = pasid,
1183
.pt_id = pt_id,
1184
},
1185
};
1186
1187
return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_ATTACH),
1188
&test_attach);
1189
}
1190
1191
#define test_cmd_pasid_attach(pasid, hwpt_id) \
1192
ASSERT_EQ(0, _test_cmd_pasid_attach(self->fd, self->stdev_id, \
1193
pasid, hwpt_id))
1194
1195
#define test_err_pasid_attach(_errno, pasid, hwpt_id) \
1196
EXPECT_ERRNO(_errno, \
1197
_test_cmd_pasid_attach(self->fd, self->stdev_id, \
1198
pasid, hwpt_id))
1199
1200
static int _test_cmd_pasid_replace(int fd, __u32 stdev_id, __u32 pasid,
1201
__u32 pt_id)
1202
{
1203
struct iommu_test_cmd test_replace = {
1204
.size = sizeof(test_replace),
1205
.op = IOMMU_TEST_OP_PASID_REPLACE,
1206
.id = stdev_id,
1207
.pasid_replace = {
1208
.pasid = pasid,
1209
.pt_id = pt_id,
1210
},
1211
};
1212
1213
return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_REPLACE),
1214
&test_replace);
1215
}
1216
1217
#define test_cmd_pasid_replace(pasid, hwpt_id) \
1218
ASSERT_EQ(0, _test_cmd_pasid_replace(self->fd, self->stdev_id, \
1219
pasid, hwpt_id))
1220
1221
#define test_err_pasid_replace(_errno, pasid, hwpt_id) \
1222
EXPECT_ERRNO(_errno, \
1223
_test_cmd_pasid_replace(self->fd, self->stdev_id, \
1224
pasid, hwpt_id))
1225
1226
static int _test_cmd_pasid_detach(int fd, __u32 stdev_id, __u32 pasid)
1227
{
1228
struct iommu_test_cmd test_detach = {
1229
.size = sizeof(test_detach),
1230
.op = IOMMU_TEST_OP_PASID_DETACH,
1231
.id = stdev_id,
1232
.pasid_detach = {
1233
.pasid = pasid,
1234
},
1235
};
1236
1237
return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_DETACH),
1238
&test_detach);
1239
}
1240
1241
#define test_cmd_pasid_detach(pasid) \
1242
ASSERT_EQ(0, _test_cmd_pasid_detach(self->fd, self->stdev_id, pasid))
1243
1244
static int test_cmd_pasid_check_hwpt(int fd, __u32 stdev_id, __u32 pasid,
1245
__u32 hwpt_id)
1246
{
1247
struct iommu_test_cmd test_pasid_check = {
1248
.size = sizeof(test_pasid_check),
1249
.op = IOMMU_TEST_OP_PASID_CHECK_HWPT,
1250
.id = stdev_id,
1251
.pasid_check = {
1252
.pasid = pasid,
1253
.hwpt_id = hwpt_id,
1254
},
1255
};
1256
1257
return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_CHECK_HWPT),
1258
&test_pasid_check);
1259
}
1260
1261