Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/virtio/vringh_test.c
26146 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* Simple test of virtio code, entirely in userpsace. */
3
#define _GNU_SOURCE
4
#include <sched.h>
5
#include <err.h>
6
#include <linux/kernel.h>
7
#include <linux/err.h>
8
#include <linux/virtio.h>
9
#include <linux/vringh.h>
10
#include <linux/virtio_ring.h>
11
#include <linux/virtio_config.h>
12
#include <linux/uaccess.h>
13
#include <sys/types.h>
14
#include <sys/stat.h>
15
#include <sys/mman.h>
16
#include <sys/wait.h>
17
#include <fcntl.h>
18
19
#define USER_MEM (1024*1024)
20
void *__user_addr_min, *__user_addr_max;
21
void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
22
static u64 user_addr_offset;
23
24
#define RINGSIZE 256
25
#define ALIGN 4096
26
27
static bool never_notify_host(struct virtqueue *vq)
28
{
29
abort();
30
}
31
32
static void never_callback_guest(struct virtqueue *vq)
33
{
34
abort();
35
}
36
37
static bool getrange_iov(struct vringh *vrh, u64 addr, struct vringh_range *r)
38
{
39
if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset)
40
return false;
41
if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset)
42
return false;
43
44
r->start = (u64)(unsigned long)__user_addr_min - user_addr_offset;
45
r->end_incl = (u64)(unsigned long)__user_addr_max - 1 - user_addr_offset;
46
r->offset = user_addr_offset;
47
return true;
48
}
49
50
/* We return single byte ranges. */
51
static bool getrange_slow(struct vringh *vrh, u64 addr, struct vringh_range *r)
52
{
53
if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset)
54
return false;
55
if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset)
56
return false;
57
58
r->start = addr;
59
r->end_incl = r->start;
60
r->offset = user_addr_offset;
61
return true;
62
}
63
64
struct guest_virtio_device {
65
struct virtio_device vdev;
66
int to_host_fd;
67
unsigned long notifies;
68
};
69
70
static bool parallel_notify_host(struct virtqueue *vq)
71
{
72
int rc;
73
struct guest_virtio_device *gvdev;
74
75
gvdev = container_of(vq->vdev, struct guest_virtio_device, vdev);
76
rc = write(gvdev->to_host_fd, "", 1);
77
if (rc < 0)
78
return false;
79
gvdev->notifies++;
80
return true;
81
}
82
83
static bool no_notify_host(struct virtqueue *vq)
84
{
85
return true;
86
}
87
88
#define NUM_XFERS (10000000)
89
90
/* We aim for two "distant" cpus. */
91
static void find_cpus(unsigned int *first, unsigned int *last)
92
{
93
unsigned int i;
94
95
*first = -1U;
96
*last = 0;
97
for (i = 0; i < 4096; i++) {
98
cpu_set_t set;
99
CPU_ZERO(&set);
100
CPU_SET(i, &set);
101
if (sched_setaffinity(getpid(), sizeof(set), &set) == 0) {
102
if (i < *first)
103
*first = i;
104
if (i > *last)
105
*last = i;
106
}
107
}
108
}
109
110
/* Opencoded version for fast mode */
111
static inline int vringh_get_head(struct vringh *vrh, u16 *head)
112
{
113
u16 avail_idx, i;
114
int err;
115
116
err = get_user(avail_idx, &vrh->vring.avail->idx);
117
if (err)
118
return err;
119
120
if (vrh->last_avail_idx == avail_idx)
121
return 0;
122
123
/* Only get avail ring entries after they have been exposed by guest. */
124
virtio_rmb(vrh->weak_barriers);
125
126
i = vrh->last_avail_idx & (vrh->vring.num - 1);
127
128
err = get_user(*head, &vrh->vring.avail->ring[i]);
129
if (err)
130
return err;
131
132
vrh->last_avail_idx++;
133
return 1;
134
}
135
136
static int parallel_test(u64 features,
137
bool (*getrange)(struct vringh *vrh,
138
u64 addr, struct vringh_range *r),
139
bool fast_vringh)
140
{
141
void *host_map, *guest_map;
142
int pipe_ret, fd, mapsize, to_guest[2], to_host[2];
143
unsigned long xfers = 0, notifies = 0, receives = 0;
144
unsigned int first_cpu, last_cpu;
145
cpu_set_t cpu_set;
146
char buf[128];
147
148
/* Create real file to mmap. */
149
fd = open("/tmp/vringh_test-file", O_RDWR|O_CREAT|O_TRUNC, 0600);
150
if (fd < 0)
151
err(1, "Opening /tmp/vringh_test-file");
152
153
/* Extra room at the end for some data, and indirects */
154
mapsize = vring_size(RINGSIZE, ALIGN)
155
+ RINGSIZE * 2 * sizeof(int)
156
+ RINGSIZE * 6 * sizeof(struct vring_desc);
157
mapsize = (mapsize + getpagesize() - 1) & ~(getpagesize() - 1);
158
ftruncate(fd, mapsize);
159
160
/* Parent and child use separate addresses, to check our mapping logic! */
161
host_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
162
guest_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
163
164
pipe_ret = pipe(to_guest);
165
assert(!pipe_ret);
166
167
pipe_ret = pipe(to_host);
168
assert(!pipe_ret);
169
170
CPU_ZERO(&cpu_set);
171
find_cpus(&first_cpu, &last_cpu);
172
printf("Using CPUS %u and %u\n", first_cpu, last_cpu);
173
fflush(stdout);
174
175
if (fork() != 0) {
176
struct vringh vrh;
177
int status, err, rlen = 0;
178
char rbuf[5];
179
180
/* We are the host: never access guest addresses! */
181
munmap(guest_map, mapsize);
182
183
__user_addr_min = host_map;
184
__user_addr_max = __user_addr_min + mapsize;
185
user_addr_offset = host_map - guest_map;
186
assert(user_addr_offset);
187
188
close(to_guest[0]);
189
close(to_host[1]);
190
191
vring_init(&vrh.vring, RINGSIZE, host_map, ALIGN);
192
vringh_init_user(&vrh, features, RINGSIZE, true,
193
vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
194
CPU_SET(first_cpu, &cpu_set);
195
if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
196
errx(1, "Could not set affinity to cpu %u", first_cpu);
197
198
while (xfers < NUM_XFERS) {
199
struct iovec host_riov[2], host_wiov[2];
200
struct vringh_iov riov, wiov;
201
u16 head, written;
202
203
if (fast_vringh) {
204
for (;;) {
205
err = vringh_get_head(&vrh, &head);
206
if (err != 0)
207
break;
208
err = vringh_need_notify_user(&vrh);
209
if (err < 0)
210
errx(1, "vringh_need_notify_user: %i",
211
err);
212
if (err) {
213
write(to_guest[1], "", 1);
214
notifies++;
215
}
216
}
217
if (err != 1)
218
errx(1, "vringh_get_head");
219
written = 0;
220
goto complete;
221
} else {
222
vringh_iov_init(&riov,
223
host_riov,
224
ARRAY_SIZE(host_riov));
225
vringh_iov_init(&wiov,
226
host_wiov,
227
ARRAY_SIZE(host_wiov));
228
229
err = vringh_getdesc_user(&vrh, &riov, &wiov,
230
getrange, &head);
231
}
232
if (err == 0) {
233
err = vringh_need_notify_user(&vrh);
234
if (err < 0)
235
errx(1, "vringh_need_notify_user: %i",
236
err);
237
if (err) {
238
write(to_guest[1], "", 1);
239
notifies++;
240
}
241
242
if (!vringh_notify_enable_user(&vrh))
243
continue;
244
245
/* Swallow all notifies at once. */
246
if (read(to_host[0], buf, sizeof(buf)) < 1)
247
break;
248
249
vringh_notify_disable_user(&vrh);
250
receives++;
251
continue;
252
}
253
if (err != 1)
254
errx(1, "vringh_getdesc_user: %i", err);
255
256
/* We simply copy bytes. */
257
if (riov.used) {
258
rlen = vringh_iov_pull_user(&riov, rbuf,
259
sizeof(rbuf));
260
if (rlen != 4)
261
errx(1, "vringh_iov_pull_user: %i",
262
rlen);
263
assert(riov.i == riov.used);
264
written = 0;
265
} else {
266
err = vringh_iov_push_user(&wiov, rbuf, rlen);
267
if (err != rlen)
268
errx(1, "vringh_iov_push_user: %i",
269
err);
270
assert(wiov.i == wiov.used);
271
written = err;
272
}
273
complete:
274
xfers++;
275
276
err = vringh_complete_user(&vrh, head, written);
277
if (err != 0)
278
errx(1, "vringh_complete_user: %i", err);
279
}
280
281
err = vringh_need_notify_user(&vrh);
282
if (err < 0)
283
errx(1, "vringh_need_notify_user: %i", err);
284
if (err) {
285
write(to_guest[1], "", 1);
286
notifies++;
287
}
288
wait(&status);
289
if (!WIFEXITED(status))
290
errx(1, "Child died with signal %i?", WTERMSIG(status));
291
if (WEXITSTATUS(status) != 0)
292
errx(1, "Child exited %i?", WEXITSTATUS(status));
293
printf("Host: notified %lu, pinged %lu\n", notifies, receives);
294
return 0;
295
} else {
296
struct guest_virtio_device gvdev;
297
struct virtqueue *vq;
298
unsigned int *data;
299
struct vring_desc *indirects;
300
unsigned int finished = 0;
301
302
/* We pass sg[]s pointing into here, but we need RINGSIZE+1 */
303
data = guest_map + vring_size(RINGSIZE, ALIGN);
304
indirects = (void *)data + (RINGSIZE + 1) * 2 * sizeof(int);
305
306
/* We are the guest. */
307
munmap(host_map, mapsize);
308
309
close(to_guest[1]);
310
close(to_host[0]);
311
312
gvdev.vdev.features = features;
313
INIT_LIST_HEAD(&gvdev.vdev.vqs);
314
spin_lock_init(&gvdev.vdev.vqs_list_lock);
315
gvdev.to_host_fd = to_host[1];
316
gvdev.notifies = 0;
317
318
CPU_SET(first_cpu, &cpu_set);
319
if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
320
err(1, "Could not set affinity to cpu %u", first_cpu);
321
322
vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &gvdev.vdev, true,
323
false, guest_map,
324
fast_vringh ? no_notify_host
325
: parallel_notify_host,
326
never_callback_guest, "guest vq");
327
328
/* Don't kfree indirects. */
329
__kfree_ignore_start = indirects;
330
__kfree_ignore_end = indirects + RINGSIZE * 6;
331
332
while (xfers < NUM_XFERS) {
333
struct scatterlist sg[4];
334
unsigned int num_sg, len;
335
int *dbuf, err;
336
bool output = !(xfers % 2);
337
338
/* Consume bufs. */
339
while ((dbuf = virtqueue_get_buf(vq, &len)) != NULL) {
340
if (len == 4)
341
assert(*dbuf == finished - 1);
342
else if (!fast_vringh)
343
assert(*dbuf == finished);
344
finished++;
345
}
346
347
/* Produce a buffer. */
348
dbuf = data + (xfers % (RINGSIZE + 1));
349
350
if (output)
351
*dbuf = xfers;
352
else
353
*dbuf = -1;
354
355
switch ((xfers / sizeof(*dbuf)) % 4) {
356
case 0:
357
/* Nasty three-element sg list. */
358
sg_init_table(sg, num_sg = 3);
359
sg_set_buf(&sg[0], (void *)dbuf, 1);
360
sg_set_buf(&sg[1], (void *)dbuf + 1, 2);
361
sg_set_buf(&sg[2], (void *)dbuf + 3, 1);
362
break;
363
case 1:
364
sg_init_table(sg, num_sg = 2);
365
sg_set_buf(&sg[0], (void *)dbuf, 1);
366
sg_set_buf(&sg[1], (void *)dbuf + 1, 3);
367
break;
368
case 2:
369
sg_init_table(sg, num_sg = 1);
370
sg_set_buf(&sg[0], (void *)dbuf, 4);
371
break;
372
case 3:
373
sg_init_table(sg, num_sg = 4);
374
sg_set_buf(&sg[0], (void *)dbuf, 1);
375
sg_set_buf(&sg[1], (void *)dbuf + 1, 1);
376
sg_set_buf(&sg[2], (void *)dbuf + 2, 1);
377
sg_set_buf(&sg[3], (void *)dbuf + 3, 1);
378
break;
379
}
380
381
/* May allocate an indirect, so force it to allocate
382
* user addr */
383
__kmalloc_fake = indirects + (xfers % RINGSIZE) * 4;
384
if (output)
385
err = virtqueue_add_outbuf(vq, sg, num_sg, dbuf,
386
GFP_KERNEL);
387
else
388
err = virtqueue_add_inbuf(vq, sg, num_sg,
389
dbuf, GFP_KERNEL);
390
391
if (err == -ENOSPC) {
392
if (!virtqueue_enable_cb_delayed(vq))
393
continue;
394
/* Swallow all notifies at once. */
395
if (read(to_guest[0], buf, sizeof(buf)) < 1)
396
break;
397
398
receives++;
399
virtqueue_disable_cb(vq);
400
continue;
401
}
402
403
if (err)
404
errx(1, "virtqueue_add_in/outbuf: %i", err);
405
406
xfers++;
407
virtqueue_kick(vq);
408
}
409
410
/* Any extra? */
411
while (finished != xfers) {
412
int *dbuf;
413
unsigned int len;
414
415
/* Consume bufs. */
416
dbuf = virtqueue_get_buf(vq, &len);
417
if (dbuf) {
418
if (len == 4)
419
assert(*dbuf == finished - 1);
420
else
421
assert(len == 0);
422
finished++;
423
continue;
424
}
425
426
if (!virtqueue_enable_cb_delayed(vq))
427
continue;
428
if (read(to_guest[0], buf, sizeof(buf)) < 1)
429
break;
430
431
receives++;
432
virtqueue_disable_cb(vq);
433
}
434
435
printf("Guest: notified %lu, pinged %lu\n",
436
gvdev.notifies, receives);
437
vring_del_virtqueue(vq);
438
return 0;
439
}
440
}
441
442
int main(int argc, char *argv[])
443
{
444
struct virtio_device vdev;
445
struct virtqueue *vq;
446
struct vringh vrh;
447
struct scatterlist guest_sg[RINGSIZE], *sgs[2];
448
struct iovec host_riov[2], host_wiov[2];
449
struct vringh_iov riov, wiov;
450
struct vring_used_elem used[RINGSIZE];
451
char buf[28];
452
u16 head;
453
int err;
454
unsigned i;
455
void *ret;
456
bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r);
457
bool fast_vringh = false, parallel = false;
458
459
getrange = getrange_iov;
460
vdev.features = 0;
461
INIT_LIST_HEAD(&vdev.vqs);
462
spin_lock_init(&vdev.vqs_list_lock);
463
464
while (argv[1]) {
465
if (strcmp(argv[1], "--indirect") == 0)
466
__virtio_set_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC);
467
else if (strcmp(argv[1], "--eventidx") == 0)
468
__virtio_set_bit(&vdev, VIRTIO_RING_F_EVENT_IDX);
469
else if (strcmp(argv[1], "--virtio-1") == 0)
470
__virtio_set_bit(&vdev, VIRTIO_F_VERSION_1);
471
else if (strcmp(argv[1], "--slow-range") == 0)
472
getrange = getrange_slow;
473
else if (strcmp(argv[1], "--fast-vringh") == 0)
474
fast_vringh = true;
475
else if (strcmp(argv[1], "--parallel") == 0)
476
parallel = true;
477
else
478
errx(1, "Unknown arg %s", argv[1]);
479
argv++;
480
}
481
482
if (parallel)
483
return parallel_test(vdev.features, getrange, fast_vringh);
484
485
if (posix_memalign(&__user_addr_min, PAGE_SIZE, USER_MEM) != 0)
486
abort();
487
__user_addr_max = __user_addr_min + USER_MEM;
488
memset(__user_addr_min, 0, vring_size(RINGSIZE, ALIGN));
489
490
/* Set up guest side. */
491
vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true, false,
492
__user_addr_min,
493
never_notify_host, never_callback_guest,
494
"guest vq");
495
496
/* Set up host side. */
497
vring_init(&vrh.vring, RINGSIZE, __user_addr_min, ALIGN);
498
vringh_init_user(&vrh, vdev.features, RINGSIZE, true,
499
vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
500
501
/* No descriptor to get yet... */
502
err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
503
if (err != 0)
504
errx(1, "vringh_getdesc_user: %i", err);
505
506
/* Guest puts in a descriptor. */
507
memcpy(__user_addr_max - 1, "a", 1);
508
sg_init_table(guest_sg, 1);
509
sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1);
510
sg_init_table(guest_sg+1, 1);
511
sg_set_buf(&guest_sg[1], __user_addr_max - 3, 2);
512
sgs[0] = &guest_sg[0];
513
sgs[1] = &guest_sg[1];
514
515
/* May allocate an indirect, so force it to allocate user addr */
516
__kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
517
err = virtqueue_add_sgs(vq, sgs, 1, 1, &err, GFP_KERNEL);
518
if (err)
519
errx(1, "virtqueue_add_sgs: %i", err);
520
__kmalloc_fake = NULL;
521
522
/* Host retrieves it. */
523
vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
524
vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
525
526
err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
527
if (err != 1)
528
errx(1, "vringh_getdesc_user: %i", err);
529
530
assert(riov.used == 1);
531
assert(riov.iov[0].iov_base == __user_addr_max - 1);
532
assert(riov.iov[0].iov_len == 1);
533
if (getrange != getrange_slow) {
534
assert(wiov.used == 1);
535
assert(wiov.iov[0].iov_base == __user_addr_max - 3);
536
assert(wiov.iov[0].iov_len == 2);
537
} else {
538
assert(wiov.used == 2);
539
assert(wiov.iov[0].iov_base == __user_addr_max - 3);
540
assert(wiov.iov[0].iov_len == 1);
541
assert(wiov.iov[1].iov_base == __user_addr_max - 2);
542
assert(wiov.iov[1].iov_len == 1);
543
}
544
545
err = vringh_iov_pull_user(&riov, buf, 5);
546
if (err != 1)
547
errx(1, "vringh_iov_pull_user: %i", err);
548
assert(buf[0] == 'a');
549
assert(riov.i == 1);
550
assert(vringh_iov_pull_user(&riov, buf, 5) == 0);
551
552
memcpy(buf, "bcdef", 5);
553
err = vringh_iov_push_user(&wiov, buf, 5);
554
if (err != 2)
555
errx(1, "vringh_iov_push_user: %i", err);
556
assert(memcmp(__user_addr_max - 3, "bc", 2) == 0);
557
assert(wiov.i == wiov.used);
558
assert(vringh_iov_push_user(&wiov, buf, 5) == 0);
559
560
/* Host is done. */
561
err = vringh_complete_user(&vrh, head, err);
562
if (err != 0)
563
errx(1, "vringh_complete_user: %i", err);
564
565
/* Guest should see used token now. */
566
__kfree_ignore_start = __user_addr_min + vring_size(RINGSIZE, ALIGN);
567
__kfree_ignore_end = __kfree_ignore_start + 1;
568
ret = virtqueue_get_buf(vq, &i);
569
if (ret != &err)
570
errx(1, "virtqueue_get_buf: %p", ret);
571
assert(i == 2);
572
573
/* Guest puts in a huge descriptor. */
574
sg_init_table(guest_sg, RINGSIZE);
575
for (i = 0; i < RINGSIZE; i++) {
576
sg_set_buf(&guest_sg[i],
577
__user_addr_max - USER_MEM/4, USER_MEM/4);
578
}
579
580
/* Fill contents with recognisable garbage. */
581
for (i = 0; i < USER_MEM/4; i++)
582
((char *)__user_addr_max - USER_MEM/4)[i] = i;
583
584
/* This will allocate an indirect, so force it to allocate user addr */
585
__kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
586
err = virtqueue_add_outbuf(vq, guest_sg, RINGSIZE, &err, GFP_KERNEL);
587
if (err)
588
errx(1, "virtqueue_add_outbuf (large): %i", err);
589
__kmalloc_fake = NULL;
590
591
/* Host picks it up (allocates new iov). */
592
vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
593
vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
594
595
err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
596
if (err != 1)
597
errx(1, "vringh_getdesc_user: %i", err);
598
599
assert(riov.max_num & VRINGH_IOV_ALLOCATED);
600
assert(riov.iov != host_riov);
601
if (getrange != getrange_slow)
602
assert(riov.used == RINGSIZE);
603
else
604
assert(riov.used == RINGSIZE * USER_MEM/4);
605
606
assert(!(wiov.max_num & VRINGH_IOV_ALLOCATED));
607
assert(wiov.used == 0);
608
609
/* Pull data back out (in odd chunks), should be as expected. */
610
for (i = 0; i < RINGSIZE * USER_MEM/4; i += 3) {
611
err = vringh_iov_pull_user(&riov, buf, 3);
612
if (err != 3 && i + err != RINGSIZE * USER_MEM/4)
613
errx(1, "vringh_iov_pull_user large: %i", err);
614
assert(buf[0] == (char)i);
615
assert(err < 2 || buf[1] == (char)(i + 1));
616
assert(err < 3 || buf[2] == (char)(i + 2));
617
}
618
assert(riov.i == riov.used);
619
vringh_iov_cleanup(&riov);
620
vringh_iov_cleanup(&wiov);
621
622
/* Complete using multi interface, just because we can. */
623
used[0].id = head;
624
used[0].len = 0;
625
err = vringh_complete_multi_user(&vrh, used, 1);
626
if (err)
627
errx(1, "vringh_complete_multi_user(1): %i", err);
628
629
/* Free up those descriptors. */
630
ret = virtqueue_get_buf(vq, &i);
631
if (ret != &err)
632
errx(1, "virtqueue_get_buf: %p", ret);
633
634
/* Add lots of descriptors. */
635
sg_init_table(guest_sg, 1);
636
sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1);
637
for (i = 0; i < RINGSIZE; i++) {
638
err = virtqueue_add_outbuf(vq, guest_sg, 1, &err, GFP_KERNEL);
639
if (err)
640
errx(1, "virtqueue_add_outbuf (multiple): %i", err);
641
}
642
643
/* Now get many, and consume them all at once. */
644
vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
645
vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
646
647
for (i = 0; i < RINGSIZE; i++) {
648
err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
649
if (err != 1)
650
errx(1, "vringh_getdesc_user: %i", err);
651
used[i].id = head;
652
used[i].len = 0;
653
}
654
/* Make sure it wraps around ring, to test! */
655
assert(vrh.vring.used->idx % RINGSIZE != 0);
656
err = vringh_complete_multi_user(&vrh, used, RINGSIZE);
657
if (err)
658
errx(1, "vringh_complete_multi_user: %i", err);
659
660
/* Free those buffers. */
661
for (i = 0; i < RINGSIZE; i++) {
662
unsigned len;
663
assert(virtqueue_get_buf(vq, &len) != NULL);
664
}
665
666
/* Test weird (but legal!) indirect. */
667
if (__virtio_test_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
668
char *data = __user_addr_max - USER_MEM/4;
669
struct vring_desc *d = __user_addr_max - USER_MEM/2;
670
struct vring vring;
671
672
/* Force creation of direct, which we modify. */
673
__virtio_clear_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC);
674
vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true,
675
false, __user_addr_min,
676
never_notify_host,
677
never_callback_guest,
678
"guest vq");
679
680
sg_init_table(guest_sg, 4);
681
sg_set_buf(&guest_sg[0], d, sizeof(*d)*2);
682
sg_set_buf(&guest_sg[1], d + 2, sizeof(*d)*1);
683
sg_set_buf(&guest_sg[2], data + 6, 4);
684
sg_set_buf(&guest_sg[3], d + 3, sizeof(*d)*3);
685
686
err = virtqueue_add_outbuf(vq, guest_sg, 4, &err, GFP_KERNEL);
687
if (err)
688
errx(1, "virtqueue_add_outbuf (indirect): %i", err);
689
690
vring_init(&vring, RINGSIZE, __user_addr_min, ALIGN);
691
692
/* They're used in order, but double-check... */
693
assert(vring.desc[0].addr == (unsigned long)d);
694
assert(vring.desc[1].addr == (unsigned long)(d+2));
695
assert(vring.desc[2].addr == (unsigned long)data + 6);
696
assert(vring.desc[3].addr == (unsigned long)(d+3));
697
vring.desc[0].flags |= VRING_DESC_F_INDIRECT;
698
vring.desc[1].flags |= VRING_DESC_F_INDIRECT;
699
vring.desc[3].flags |= VRING_DESC_F_INDIRECT;
700
701
/* First indirect */
702
d[0].addr = (unsigned long)data;
703
d[0].len = 1;
704
d[0].flags = VRING_DESC_F_NEXT;
705
d[0].next = 1;
706
d[1].addr = (unsigned long)data + 1;
707
d[1].len = 2;
708
d[1].flags = 0;
709
710
/* Second indirect */
711
d[2].addr = (unsigned long)data + 3;
712
d[2].len = 3;
713
d[2].flags = 0;
714
715
/* Third indirect */
716
d[3].addr = (unsigned long)data + 10;
717
d[3].len = 5;
718
d[3].flags = VRING_DESC_F_NEXT;
719
d[3].next = 1;
720
d[4].addr = (unsigned long)data + 15;
721
d[4].len = 6;
722
d[4].flags = VRING_DESC_F_NEXT;
723
d[4].next = 2;
724
d[5].addr = (unsigned long)data + 21;
725
d[5].len = 7;
726
d[5].flags = 0;
727
728
/* Host picks it up (allocates new iov). */
729
vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
730
vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
731
732
err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
733
if (err != 1)
734
errx(1, "vringh_getdesc_user: %i", err);
735
736
if (head != 0)
737
errx(1, "vringh_getdesc_user: head %i not 0", head);
738
739
assert(riov.max_num & VRINGH_IOV_ALLOCATED);
740
if (getrange != getrange_slow)
741
assert(riov.used == 7);
742
else
743
assert(riov.used == 28);
744
err = vringh_iov_pull_user(&riov, buf, 29);
745
assert(err == 28);
746
747
/* Data should be linear. */
748
for (i = 0; i < err; i++)
749
assert(buf[i] == i);
750
vringh_iov_cleanup(&riov);
751
}
752
753
/* Don't leak memory... */
754
vring_del_virtqueue(vq);
755
free(__user_addr_min);
756
757
return 0;
758
}
759
760