Merge tag 'arc-fixes-for-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupt...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / tools / virtio / vringh_test.c
1 /* Simple test of virtio code, entirely in userpsace. */
2 #define _GNU_SOURCE
3 #include <sched.h>
4 #include <err.h>
5 #include <linux/kernel.h>
6 #include <linux/err.h>
7 #include <linux/virtio.h>
8 #include <linux/vringh.h>
9 #include <linux/virtio_ring.h>
10 #include <linux/uaccess.h>
11 #include <sys/types.h>
12 #include <sys/stat.h>
13 #include <sys/mman.h>
14 #include <sys/wait.h>
15 #include <fcntl.h>
16
17 #define USER_MEM (1024*1024)
18 void *__user_addr_min, *__user_addr_max;
19 void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
20 static u64 user_addr_offset;
21
22 #define RINGSIZE 256
23 #define ALIGN 4096
24
25 static void never_notify_host(struct virtqueue *vq)
26 {
27         abort();
28 }
29
30 static void never_callback_guest(struct virtqueue *vq)
31 {
32         abort();
33 }
34
35 static bool getrange_iov(struct vringh *vrh, u64 addr, struct vringh_range *r)
36 {
37         if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset)
38                 return false;
39         if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset)
40                 return false;
41
42         r->start = (u64)(unsigned long)__user_addr_min - user_addr_offset;
43         r->end_incl = (u64)(unsigned long)__user_addr_max - 1 - user_addr_offset;
44         r->offset = user_addr_offset;
45         return true;
46 }
47
48 /* We return single byte ranges. */
49 static bool getrange_slow(struct vringh *vrh, u64 addr, struct vringh_range *r)
50 {
51         if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset)
52                 return false;
53         if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset)
54                 return false;
55
56         r->start = addr;
57         r->end_incl = r->start;
58         r->offset = user_addr_offset;
59         return true;
60 }
61
62 struct guest_virtio_device {
63         struct virtio_device vdev;
64         int to_host_fd;
65         unsigned long notifies;
66 };
67
68 static void parallel_notify_host(struct virtqueue *vq)
69 {
70         struct guest_virtio_device *gvdev;
71
72         gvdev = container_of(vq->vdev, struct guest_virtio_device, vdev);
73         write(gvdev->to_host_fd, "", 1);
74         gvdev->notifies++;
75 }
76
77 static void no_notify_host(struct virtqueue *vq)
78 {
79 }
80
81 #define NUM_XFERS (10000000)
82
83 /* We aim for two "distant" cpus. */
84 static void find_cpus(unsigned int *first, unsigned int *last)
85 {
86         unsigned int i;
87
88         *first = -1U;
89         *last = 0;
90         for (i = 0; i < 4096; i++) {
91                 cpu_set_t set;
92                 CPU_ZERO(&set);
93                 CPU_SET(i, &set);
94                 if (sched_setaffinity(getpid(), sizeof(set), &set) == 0) {
95                         if (i < *first)
96                                 *first = i;
97                         if (i > *last)
98                                 *last = i;
99                 }
100         }
101 }
102
103 /* Opencoded version for fast mode */
104 static inline int vringh_get_head(struct vringh *vrh, u16 *head)
105 {
106         u16 avail_idx, i;
107         int err;
108
109         err = get_user(avail_idx, &vrh->vring.avail->idx);
110         if (err)
111                 return err;
112
113         if (vrh->last_avail_idx == avail_idx)
114                 return 0;
115
116         /* Only get avail ring entries after they have been exposed by guest. */
117         virtio_rmb(vrh->weak_barriers);
118
119         i = vrh->last_avail_idx & (vrh->vring.num - 1);
120
121         err = get_user(*head, &vrh->vring.avail->ring[i]);
122         if (err)
123                 return err;
124
125         vrh->last_avail_idx++;
126         return 1;
127 }
128
129 static int parallel_test(unsigned long features,
130                          bool (*getrange)(struct vringh *vrh,
131                                           u64 addr, struct vringh_range *r),
132                          bool fast_vringh)
133 {
134         void *host_map, *guest_map;
135         int fd, mapsize, to_guest[2], to_host[2];
136         unsigned long xfers = 0, notifies = 0, receives = 0;
137         unsigned int first_cpu, last_cpu;
138         cpu_set_t cpu_set;
139         char buf[128];
140
141         /* Create real file to mmap. */
142         fd = open("/tmp/vringh_test-file", O_RDWR|O_CREAT|O_TRUNC, 0600);
143         if (fd < 0)
144                 err(1, "Opening /tmp/vringh_test-file");
145
146         /* Extra room at the end for some data, and indirects */
147         mapsize = vring_size(RINGSIZE, ALIGN)
148                 + RINGSIZE * 2 * sizeof(int)
149                 + RINGSIZE * 6 * sizeof(struct vring_desc);
150         mapsize = (mapsize + getpagesize() - 1) & ~(getpagesize() - 1);
151         ftruncate(fd, mapsize);
152
153         /* Parent and child use separate addresses, to check our mapping logic! */
154         host_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
155         guest_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
156
157         pipe(to_guest);
158         pipe(to_host);
159
160         CPU_ZERO(&cpu_set);
161         find_cpus(&first_cpu, &last_cpu);
162         printf("Using CPUS %u and %u\n", first_cpu, last_cpu);
163         fflush(stdout);
164
165         if (fork() != 0) {
166                 struct vringh vrh;
167                 int status, err, rlen = 0;
168                 char rbuf[5];
169
170                 /* We are the host: never access guest addresses! */
171                 munmap(guest_map, mapsize);
172
173                 __user_addr_min = host_map;
174                 __user_addr_max = __user_addr_min + mapsize;
175                 user_addr_offset = host_map - guest_map;
176                 assert(user_addr_offset);
177
178                 close(to_guest[0]);
179                 close(to_host[1]);
180
181                 vring_init(&vrh.vring, RINGSIZE, host_map, ALIGN);
182                 vringh_init_user(&vrh, features, RINGSIZE, true,
183                                  vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
184                 CPU_SET(first_cpu, &cpu_set);
185                 if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
186                         errx(1, "Could not set affinity to cpu %u", first_cpu);
187
188                 while (xfers < NUM_XFERS) {
189                         struct iovec host_riov[2], host_wiov[2];
190                         struct vringh_iov riov, wiov;
191                         u16 head, written;
192
193                         if (fast_vringh) {
194                                 for (;;) {
195                                         err = vringh_get_head(&vrh, &head);
196                                         if (err != 0)
197                                                 break;
198                                         err = vringh_need_notify_user(&vrh);
199                                         if (err < 0)
200                                                 errx(1, "vringh_need_notify_user: %i",
201                                                      err);
202                                         if (err) {
203                                                 write(to_guest[1], "", 1);
204                                                 notifies++;
205                                         }
206                                 }
207                                 if (err != 1)
208                                         errx(1, "vringh_get_head");
209                                 written = 0;
210                                 goto complete;
211                         } else {
212                                 vringh_iov_init(&riov,
213                                                 host_riov,
214                                                 ARRAY_SIZE(host_riov));
215                                 vringh_iov_init(&wiov,
216                                                 host_wiov,
217                                                 ARRAY_SIZE(host_wiov));
218
219                                 err = vringh_getdesc_user(&vrh, &riov, &wiov,
220                                                           getrange, &head);
221                         }
222                         if (err == 0) {
223                                 err = vringh_need_notify_user(&vrh);
224                                 if (err < 0)
225                                         errx(1, "vringh_need_notify_user: %i",
226                                              err);
227                                 if (err) {
228                                         write(to_guest[1], "", 1);
229                                         notifies++;
230                                 }
231
232                                 if (!vringh_notify_enable_user(&vrh))
233                                         continue;
234
235                                 /* Swallow all notifies at once. */
236                                 if (read(to_host[0], buf, sizeof(buf)) < 1)
237                                         break;
238
239                                 vringh_notify_disable_user(&vrh);
240                                 receives++;
241                                 continue;
242                         }
243                         if (err != 1)
244                                 errx(1, "vringh_getdesc_user: %i", err);
245
246                         /* We simply copy bytes. */
247                         if (riov.used) {
248                                 rlen = vringh_iov_pull_user(&riov, rbuf,
249                                                             sizeof(rbuf));
250                                 if (rlen != 4)
251                                         errx(1, "vringh_iov_pull_user: %i",
252                                              rlen);
253                                 assert(riov.i == riov.used);
254                                 written = 0;
255                         } else {
256                                 err = vringh_iov_push_user(&wiov, rbuf, rlen);
257                                 if (err != rlen)
258                                         errx(1, "vringh_iov_push_user: %i",
259                                              err);
260                                 assert(wiov.i == wiov.used);
261                                 written = err;
262                         }
263                 complete:
264                         xfers++;
265
266                         err = vringh_complete_user(&vrh, head, written);
267                         if (err != 0)
268                                 errx(1, "vringh_complete_user: %i", err);
269                 }
270
271                 err = vringh_need_notify_user(&vrh);
272                 if (err < 0)
273                         errx(1, "vringh_need_notify_user: %i", err);
274                 if (err) {
275                         write(to_guest[1], "", 1);
276                         notifies++;
277                 }
278                 wait(&status);
279                 if (!WIFEXITED(status))
280                         errx(1, "Child died with signal %i?", WTERMSIG(status));
281                 if (WEXITSTATUS(status) != 0)
282                         errx(1, "Child exited %i?", WEXITSTATUS(status));
283                 printf("Host: notified %lu, pinged %lu\n", notifies, receives);
284                 return 0;
285         } else {
286                 struct guest_virtio_device gvdev;
287                 struct virtqueue *vq;
288                 unsigned int *data;
289                 struct vring_desc *indirects;
290                 unsigned int finished = 0;
291
292                 /* We pass sg[]s pointing into here, but we need RINGSIZE+1 */
293                 data = guest_map + vring_size(RINGSIZE, ALIGN);
294                 indirects = (void *)data + (RINGSIZE + 1) * 2 * sizeof(int);
295
296                 /* We are the guest. */
297                 munmap(host_map, mapsize);
298
299                 close(to_guest[1]);
300                 close(to_host[0]);
301
302                 gvdev.vdev.features[0] = features;
303                 gvdev.to_host_fd = to_host[1];
304                 gvdev.notifies = 0;
305
306                 CPU_SET(first_cpu, &cpu_set);
307                 if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
308                         err(1, "Could not set affinity to cpu %u", first_cpu);
309
310                 vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &gvdev.vdev, true,
311                                          guest_map, fast_vringh ? no_notify_host
312                                          : parallel_notify_host,
313                                          never_callback_guest, "guest vq");
314
315                 /* Don't kfree indirects. */
316                 __kfree_ignore_start = indirects;
317                 __kfree_ignore_end = indirects + RINGSIZE * 6;
318
319                 while (xfers < NUM_XFERS) {
320                         struct scatterlist sg[4];
321                         unsigned int num_sg, len;
322                         int *dbuf, err;
323                         bool output = !(xfers % 2);
324
325                         /* Consume bufs. */
326                         while ((dbuf = virtqueue_get_buf(vq, &len)) != NULL) {
327                                 if (len == 4)
328                                         assert(*dbuf == finished - 1);
329                                 else if (!fast_vringh)
330                                         assert(*dbuf == finished);
331                                 finished++;
332                         }
333
334                         /* Produce a buffer. */
335                         dbuf = data + (xfers % (RINGSIZE + 1));
336
337                         if (output)
338                                 *dbuf = xfers;
339                         else
340                                 *dbuf = -1;
341
342                         switch ((xfers / sizeof(*dbuf)) % 4) {
343                         case 0:
344                                 /* Nasty three-element sg list. */
345                                 sg_init_table(sg, num_sg = 3);
346                                 sg_set_buf(&sg[0], (void *)dbuf, 1);
347                                 sg_set_buf(&sg[1], (void *)dbuf + 1, 2);
348                                 sg_set_buf(&sg[2], (void *)dbuf + 3, 1);
349                                 break;
350                         case 1:
351                                 sg_init_table(sg, num_sg = 2);
352                                 sg_set_buf(&sg[0], (void *)dbuf, 1);
353                                 sg_set_buf(&sg[1], (void *)dbuf + 1, 3);
354                                 break;
355                         case 2:
356                                 sg_init_table(sg, num_sg = 1);
357                                 sg_set_buf(&sg[0], (void *)dbuf, 4);
358                                 break;
359                         case 3:
360                                 sg_init_table(sg, num_sg = 4);
361                                 sg_set_buf(&sg[0], (void *)dbuf, 1);
362                                 sg_set_buf(&sg[1], (void *)dbuf + 1, 1);
363                                 sg_set_buf(&sg[2], (void *)dbuf + 2, 1);
364                                 sg_set_buf(&sg[3], (void *)dbuf + 3, 1);
365                                 break;
366                         }
367
368                         /* May allocate an indirect, so force it to allocate
369                          * user addr */
370                         __kmalloc_fake = indirects + (xfers % RINGSIZE) * 4;
371                         if (output)
372                                 err = virtqueue_add_outbuf(vq, sg, num_sg, dbuf,
373                                                            GFP_KERNEL);
374                         else
375                                 err = virtqueue_add_inbuf(vq, sg, num_sg,
376                                                           dbuf, GFP_KERNEL);
377
378                         if (err == -ENOSPC) {
379                                 if (!virtqueue_enable_cb_delayed(vq))
380                                         continue;
381                                 /* Swallow all notifies at once. */
382                                 if (read(to_guest[0], buf, sizeof(buf)) < 1)
383                                         break;
384                                 
385                                 receives++;
386                                 virtqueue_disable_cb(vq);
387                                 continue;
388                         }
389
390                         if (err)
391                                 errx(1, "virtqueue_add_in/outbuf: %i", err);
392
393                         xfers++;
394                         virtqueue_kick(vq);
395                 }
396
397                 /* Any extra? */
398                 while (finished != xfers) {
399                         int *dbuf;
400                         unsigned int len;
401
402                         /* Consume bufs. */
403                         dbuf = virtqueue_get_buf(vq, &len);
404                         if (dbuf) {
405                                 if (len == 4)
406                                         assert(*dbuf == finished - 1);
407                                 else
408                                         assert(len == 0);
409                                 finished++;
410                                 continue;
411                         }
412
413                         if (!virtqueue_enable_cb_delayed(vq))
414                                 continue;
415                         if (read(to_guest[0], buf, sizeof(buf)) < 1)
416                                 break;
417                                 
418                         receives++;
419                         virtqueue_disable_cb(vq);
420                 }
421
422                 printf("Guest: notified %lu, pinged %lu\n",
423                        gvdev.notifies, receives);
424                 vring_del_virtqueue(vq);
425                 return 0;
426         }
427 }
428
429 int main(int argc, char *argv[])
430 {
431         struct virtio_device vdev;
432         struct virtqueue *vq;
433         struct vringh vrh;
434         struct scatterlist guest_sg[RINGSIZE], *sgs[2];
435         struct iovec host_riov[2], host_wiov[2];
436         struct vringh_iov riov, wiov;
437         struct vring_used_elem used[RINGSIZE];
438         char buf[28];
439         u16 head;
440         int err;
441         unsigned i;
442         void *ret;
443         bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r);
444         bool fast_vringh = false, parallel = false;
445
446         getrange = getrange_iov;
447         vdev.features[0] = 0;
448
449         while (argv[1]) {
450                 if (strcmp(argv[1], "--indirect") == 0)
451                         vdev.features[0] |= (1 << VIRTIO_RING_F_INDIRECT_DESC);
452                 else if (strcmp(argv[1], "--eventidx") == 0)
453                         vdev.features[0] |= (1 << VIRTIO_RING_F_EVENT_IDX);
454                 else if (strcmp(argv[1], "--slow-range") == 0)
455                         getrange = getrange_slow;
456                 else if (strcmp(argv[1], "--fast-vringh") == 0)
457                         fast_vringh = true;
458                 else if (strcmp(argv[1], "--parallel") == 0)
459                         parallel = true;
460                 else
461                         errx(1, "Unknown arg %s", argv[1]);
462                 argv++;
463         }
464
465         if (parallel)
466                 return parallel_test(vdev.features[0], getrange, fast_vringh);
467
468         if (posix_memalign(&__user_addr_min, PAGE_SIZE, USER_MEM) != 0)
469                 abort();
470         __user_addr_max = __user_addr_min + USER_MEM;
471         memset(__user_addr_min, 0, vring_size(RINGSIZE, ALIGN));
472
473         /* Set up guest side. */
474         vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true,
475                                  __user_addr_min,
476                                  never_notify_host, never_callback_guest,
477                                  "guest vq");
478
479         /* Set up host side. */
480         vring_init(&vrh.vring, RINGSIZE, __user_addr_min, ALIGN);
481         vringh_init_user(&vrh, vdev.features[0], RINGSIZE, true,
482                          vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
483
484         /* No descriptor to get yet... */
485         err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
486         if (err != 0)
487                 errx(1, "vringh_getdesc_user: %i", err);
488
489         /* Guest puts in a descriptor. */
490         memcpy(__user_addr_max - 1, "a", 1);
491         sg_init_table(guest_sg, 1);
492         sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1);
493         sg_init_table(guest_sg+1, 1);
494         sg_set_buf(&guest_sg[1], __user_addr_max - 3, 2);
495         sgs[0] = &guest_sg[0];
496         sgs[1] = &guest_sg[1];
497
498         /* May allocate an indirect, so force it to allocate user addr */
499         __kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
500         err = virtqueue_add_sgs(vq, sgs, 1, 1, &err, GFP_KERNEL);
501         if (err)
502                 errx(1, "virtqueue_add_sgs: %i", err);
503         __kmalloc_fake = NULL;
504
505         /* Host retreives it. */
506         vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
507         vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
508
509         err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
510         if (err != 1)
511                 errx(1, "vringh_getdesc_user: %i", err);
512
513         assert(riov.used == 1);
514         assert(riov.iov[0].iov_base == __user_addr_max - 1);
515         assert(riov.iov[0].iov_len == 1);
516         if (getrange != getrange_slow) {
517                 assert(wiov.used == 1);
518                 assert(wiov.iov[0].iov_base == __user_addr_max - 3);
519                 assert(wiov.iov[0].iov_len == 2);
520         } else {
521                 assert(wiov.used == 2);
522                 assert(wiov.iov[0].iov_base == __user_addr_max - 3);
523                 assert(wiov.iov[0].iov_len == 1);
524                 assert(wiov.iov[1].iov_base == __user_addr_max - 2);
525                 assert(wiov.iov[1].iov_len == 1);
526         }
527
528         err = vringh_iov_pull_user(&riov, buf, 5);
529         if (err != 1)
530                 errx(1, "vringh_iov_pull_user: %i", err);
531         assert(buf[0] == 'a');
532         assert(riov.i == 1);
533         assert(vringh_iov_pull_user(&riov, buf, 5) == 0);
534
535         memcpy(buf, "bcdef", 5);
536         err = vringh_iov_push_user(&wiov, buf, 5);
537         if (err != 2)
538                 errx(1, "vringh_iov_push_user: %i", err);
539         assert(memcmp(__user_addr_max - 3, "bc", 2) == 0);
540         assert(wiov.i == wiov.used);
541         assert(vringh_iov_push_user(&wiov, buf, 5) == 0);
542
543         /* Host is done. */
544         err = vringh_complete_user(&vrh, head, err);
545         if (err != 0)
546                 errx(1, "vringh_complete_user: %i", err);
547
548         /* Guest should see used token now. */
549         __kfree_ignore_start = __user_addr_min + vring_size(RINGSIZE, ALIGN);
550         __kfree_ignore_end = __kfree_ignore_start + 1;
551         ret = virtqueue_get_buf(vq, &i);
552         if (ret != &err)
553                 errx(1, "virtqueue_get_buf: %p", ret);
554         assert(i == 2);
555
556         /* Guest puts in a huge descriptor. */
557         sg_init_table(guest_sg, RINGSIZE);
558         for (i = 0; i < RINGSIZE; i++) {
559                 sg_set_buf(&guest_sg[i],
560                            __user_addr_max - USER_MEM/4, USER_MEM/4);
561         }
562
563         /* Fill contents with recognisable garbage. */
564         for (i = 0; i < USER_MEM/4; i++)
565                 ((char *)__user_addr_max - USER_MEM/4)[i] = i;
566
567         /* This will allocate an indirect, so force it to allocate user addr */
568         __kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
569         err = virtqueue_add_outbuf(vq, guest_sg, RINGSIZE, &err, GFP_KERNEL);
570         if (err)
571                 errx(1, "virtqueue_add_outbuf (large): %i", err);
572         __kmalloc_fake = NULL;
573
574         /* Host picks it up (allocates new iov). */
575         vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
576         vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
577
578         err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
579         if (err != 1)
580                 errx(1, "vringh_getdesc_user: %i", err);
581
582         assert(riov.max_num & VRINGH_IOV_ALLOCATED);
583         assert(riov.iov != host_riov);
584         if (getrange != getrange_slow)
585                 assert(riov.used == RINGSIZE);
586         else
587                 assert(riov.used == RINGSIZE * USER_MEM/4);
588
589         assert(!(wiov.max_num & VRINGH_IOV_ALLOCATED));
590         assert(wiov.used == 0);
591
592         /* Pull data back out (in odd chunks), should be as expected. */
593         for (i = 0; i < RINGSIZE * USER_MEM/4; i += 3) {
594                 err = vringh_iov_pull_user(&riov, buf, 3);
595                 if (err != 3 && i + err != RINGSIZE * USER_MEM/4)
596                         errx(1, "vringh_iov_pull_user large: %i", err);
597                 assert(buf[0] == (char)i);
598                 assert(err < 2 || buf[1] == (char)(i + 1));
599                 assert(err < 3 || buf[2] == (char)(i + 2));
600         }
601         assert(riov.i == riov.used);
602         vringh_iov_cleanup(&riov);
603         vringh_iov_cleanup(&wiov);
604
605         /* Complete using multi interface, just because we can. */
606         used[0].id = head;
607         used[0].len = 0;
608         err = vringh_complete_multi_user(&vrh, used, 1);
609         if (err)
610                 errx(1, "vringh_complete_multi_user(1): %i", err);
611
612         /* Free up those descriptors. */
613         ret = virtqueue_get_buf(vq, &i);
614         if (ret != &err)
615                 errx(1, "virtqueue_get_buf: %p", ret);
616
617         /* Add lots of descriptors. */
618         sg_init_table(guest_sg, 1);
619         sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1);
620         for (i = 0; i < RINGSIZE; i++) {
621                 err = virtqueue_add_outbuf(vq, guest_sg, 1, &err, GFP_KERNEL);
622                 if (err)
623                         errx(1, "virtqueue_add_outbuf (multiple): %i", err);
624         }
625
626         /* Now get many, and consume them all at once. */
627         vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
628         vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
629
630         for (i = 0; i < RINGSIZE; i++) {
631                 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
632                 if (err != 1)
633                         errx(1, "vringh_getdesc_user: %i", err);
634                 used[i].id = head;
635                 used[i].len = 0;
636         }
637         /* Make sure it wraps around ring, to test! */
638         assert(vrh.vring.used->idx % RINGSIZE != 0);
639         err = vringh_complete_multi_user(&vrh, used, RINGSIZE);
640         if (err)
641                 errx(1, "vringh_complete_multi_user: %i", err);
642
643         /* Free those buffers. */
644         for (i = 0; i < RINGSIZE; i++) {
645                 unsigned len;
646                 assert(virtqueue_get_buf(vq, &len) != NULL);
647         }
648
649         /* Test weird (but legal!) indirect. */
650         if (vdev.features[0] & (1 << VIRTIO_RING_F_INDIRECT_DESC)) {
651                 char *data = __user_addr_max - USER_MEM/4;
652                 struct vring_desc *d = __user_addr_max - USER_MEM/2;
653                 struct vring vring;
654
655                 /* Force creation of direct, which we modify. */
656                 vdev.features[0] &= ~(1 << VIRTIO_RING_F_INDIRECT_DESC);
657                 vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true,
658                                          __user_addr_min,
659                                          never_notify_host,
660                                          never_callback_guest,
661                                          "guest vq");
662
663                 sg_init_table(guest_sg, 4);
664                 sg_set_buf(&guest_sg[0], d, sizeof(*d)*2);
665                 sg_set_buf(&guest_sg[1], d + 2, sizeof(*d)*1);
666                 sg_set_buf(&guest_sg[2], data + 6, 4);
667                 sg_set_buf(&guest_sg[3], d + 3, sizeof(*d)*3);
668
669                 err = virtqueue_add_outbuf(vq, guest_sg, 4, &err, GFP_KERNEL);
670                 if (err)
671                         errx(1, "virtqueue_add_outbuf (indirect): %i", err);
672
673                 vring_init(&vring, RINGSIZE, __user_addr_min, ALIGN);
674
675                 /* They're used in order, but double-check... */
676                 assert(vring.desc[0].addr == (unsigned long)d);
677                 assert(vring.desc[1].addr == (unsigned long)(d+2));
678                 assert(vring.desc[2].addr == (unsigned long)data + 6);
679                 assert(vring.desc[3].addr == (unsigned long)(d+3));
680                 vring.desc[0].flags |= VRING_DESC_F_INDIRECT;
681                 vring.desc[1].flags |= VRING_DESC_F_INDIRECT;
682                 vring.desc[3].flags |= VRING_DESC_F_INDIRECT;
683
684                 /* First indirect */
685                 d[0].addr = (unsigned long)data;
686                 d[0].len = 1;
687                 d[0].flags = VRING_DESC_F_NEXT;
688                 d[0].next = 1;
689                 d[1].addr = (unsigned long)data + 1;
690                 d[1].len = 2;
691                 d[1].flags = 0;
692
693                 /* Second indirect */
694                 d[2].addr = (unsigned long)data + 3;
695                 d[2].len = 3;
696                 d[2].flags = 0;
697
698                 /* Third indirect */
699                 d[3].addr = (unsigned long)data + 10;
700                 d[3].len = 5;
701                 d[3].flags = VRING_DESC_F_NEXT;
702                 d[3].next = 1;
703                 d[4].addr = (unsigned long)data + 15;
704                 d[4].len = 6;
705                 d[4].flags = VRING_DESC_F_NEXT;
706                 d[4].next = 2;
707                 d[5].addr = (unsigned long)data + 21;
708                 d[5].len = 7;
709                 d[5].flags = 0;
710
711                 /* Host picks it up (allocates new iov). */
712                 vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
713                 vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
714
715                 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
716                 if (err != 1)
717                         errx(1, "vringh_getdesc_user: %i", err);
718
719                 if (head != 0)
720                         errx(1, "vringh_getdesc_user: head %i not 0", head);
721
722                 assert(riov.max_num & VRINGH_IOV_ALLOCATED);
723                 if (getrange != getrange_slow)
724                         assert(riov.used == 7);
725                 else
726                         assert(riov.used == 28);
727                 err = vringh_iov_pull_user(&riov, buf, 29);
728                 assert(err == 28);
729
730                 /* Data should be linear. */
731                 for (i = 0; i < err; i++)
732                         assert(buf[i] == i);
733                 vringh_iov_cleanup(&riov);
734         }
735
736         /* Don't leak memory... */
737         vring_del_virtqueue(vq);
738         free(__user_addr_min);
739
740         return 0;
741 }