drbd: compute the end before rb_insert_augmented()
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / vhost / test.c
1 /* Copyright (C) 2009 Red Hat, Inc.
2  * Author: Michael S. Tsirkin <mst@redhat.com>
3  *
4  * This work is licensed under the terms of the GNU GPL, version 2.
5  *
6  * test virtio server in host kernel.
7  */
8
9 #include <linux/compat.h>
10 #include <linux/eventfd.h>
11 #include <linux/vhost.h>
12 #include <linux/miscdevice.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/workqueue.h>
16 #include <linux/file.h>
17 #include <linux/slab.h>
18
19 #include "test.h"
20 #include "vhost.h"
21
22 /* Max number of bytes transferred before requeueing the job.
23  * Using this limit prevents one virtqueue from starving others. */
24 #define VHOST_TEST_WEIGHT 0x80000
25
26 enum {
27         VHOST_TEST_VQ = 0,
28         VHOST_TEST_VQ_MAX = 1,
29 };
30
31 struct vhost_test {
32         struct vhost_dev dev;
33         struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
34 };
35
36 /* Expects to be always run from workqueue - which acts as
37  * read-size critical section for our kind of RCU. */
38 static void handle_vq(struct vhost_test *n)
39 {
40         struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
41         unsigned out, in;
42         int head;
43         size_t len, total_len = 0;
44         void *private;
45
46         mutex_lock(&vq->mutex);
47         private = vq->private_data;
48         if (!private) {
49                 mutex_unlock(&vq->mutex);
50                 return;
51         }
52
53         vhost_disable_notify(&n->dev, vq);
54
55         for (;;) {
56                 head = vhost_get_vq_desc(&n->dev, vq, vq->iov,
57                                          ARRAY_SIZE(vq->iov),
58                                          &out, &in,
59                                          NULL, NULL);
60                 /* On error, stop handling until the next kick. */
61                 if (unlikely(head < 0))
62                         break;
63                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
64                 if (head == vq->num) {
65                         if (unlikely(vhost_enable_notify(&n->dev, vq))) {
66                                 vhost_disable_notify(&n->dev, vq);
67                                 continue;
68                         }
69                         break;
70                 }
71                 if (in) {
72                         vq_err(vq, "Unexpected descriptor format for TX: "
73                                "out %d, int %d\n", out, in);
74                         break;
75                 }
76                 len = iov_length(vq->iov, out);
77                 /* Sanity check */
78                 if (!len) {
79                         vq_err(vq, "Unexpected 0 len for TX\n");
80                         break;
81                 }
82                 vhost_add_used_and_signal(&n->dev, vq, head, 0);
83                 total_len += len;
84                 if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
85                         vhost_poll_queue(&vq->poll);
86                         break;
87                 }
88         }
89
90         mutex_unlock(&vq->mutex);
91 }
92
93 static void handle_vq_kick(struct vhost_work *work)
94 {
95         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
96                                                   poll.work);
97         struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
98
99         handle_vq(n);
100 }
101
102 static int vhost_test_open(struct inode *inode, struct file *f)
103 {
104         struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
105         struct vhost_dev *dev;
106         struct vhost_virtqueue **vqs;
107
108         if (!n)
109                 return -ENOMEM;
110         vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
111         if (!vqs) {
112                 kfree(n);
113                 return -ENOMEM;
114         }
115
116         dev = &n->dev;
117         vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
118         n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
119         vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
120
121         f->private_data = n;
122
123         return 0;
124 }
125
126 static void *vhost_test_stop_vq(struct vhost_test *n,
127                                 struct vhost_virtqueue *vq)
128 {
129         void *private;
130
131         mutex_lock(&vq->mutex);
132         private = vq->private_data;
133         vq->private_data = NULL;
134         mutex_unlock(&vq->mutex);
135         return private;
136 }
137
138 static void vhost_test_stop(struct vhost_test *n, void **privatep)
139 {
140         *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
141 }
142
143 static void vhost_test_flush_vq(struct vhost_test *n, int index)
144 {
145         vhost_poll_flush(&n->vqs[index].poll);
146 }
147
148 static void vhost_test_flush(struct vhost_test *n)
149 {
150         vhost_test_flush_vq(n, VHOST_TEST_VQ);
151 }
152
153 static int vhost_test_release(struct inode *inode, struct file *f)
154 {
155         struct vhost_test *n = f->private_data;
156         void  *private;
157
158         vhost_test_stop(n, &private);
159         vhost_test_flush(n);
160         vhost_dev_cleanup(&n->dev, false);
161         /* We do an extra flush before freeing memory,
162          * since jobs can re-queue themselves. */
163         vhost_test_flush(n);
164         kfree(n);
165         return 0;
166 }
167
168 static long vhost_test_run(struct vhost_test *n, int test)
169 {
170         void *priv, *oldpriv;
171         struct vhost_virtqueue *vq;
172         int r, index;
173
174         if (test < 0 || test > 1)
175                 return -EINVAL;
176
177         mutex_lock(&n->dev.mutex);
178         r = vhost_dev_check_owner(&n->dev);
179         if (r)
180                 goto err;
181
182         for (index = 0; index < n->dev.nvqs; ++index) {
183                 /* Verify that ring has been setup correctly. */
184                 if (!vhost_vq_access_ok(&n->vqs[index])) {
185                         r = -EFAULT;
186                         goto err;
187                 }
188         }
189
190         for (index = 0; index < n->dev.nvqs; ++index) {
191                 vq = n->vqs + index;
192                 mutex_lock(&vq->mutex);
193                 priv = test ? n : NULL;
194
195                 /* start polling new socket */
196                 oldpriv = vq->private_data;
197                 vq->private_data = priv;
198
199                 r = vhost_init_used(&n->vqs[index]);
200
201                 mutex_unlock(&vq->mutex);
202
203                 if (r)
204                         goto err;
205
206                 if (oldpriv) {
207                         vhost_test_flush_vq(n, index);
208                 }
209         }
210
211         mutex_unlock(&n->dev.mutex);
212         return 0;
213
214 err:
215         mutex_unlock(&n->dev.mutex);
216         return r;
217 }
218
219 static long vhost_test_reset_owner(struct vhost_test *n)
220 {
221         void *priv = NULL;
222         long err;
223         struct vhost_memory *memory;
224
225         mutex_lock(&n->dev.mutex);
226         err = vhost_dev_check_owner(&n->dev);
227         if (err)
228                 goto done;
229         memory = vhost_dev_reset_owner_prepare();
230         if (!memory) {
231                 err = -ENOMEM;
232                 goto done;
233         }
234         vhost_test_stop(n, &priv);
235         vhost_test_flush(n);
236         vhost_dev_reset_owner(&n->dev, memory);
237 done:
238         mutex_unlock(&n->dev.mutex);
239         return err;
240 }
241
242 static int vhost_test_set_features(struct vhost_test *n, u64 features)
243 {
244         mutex_lock(&n->dev.mutex);
245         if ((features & (1 << VHOST_F_LOG_ALL)) &&
246             !vhost_log_access_ok(&n->dev)) {
247                 mutex_unlock(&n->dev.mutex);
248                 return -EFAULT;
249         }
250         n->dev.acked_features = features;
251         smp_wmb();
252         vhost_test_flush(n);
253         mutex_unlock(&n->dev.mutex);
254         return 0;
255 }
256
257 static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
258                              unsigned long arg)
259 {
260         struct vhost_test *n = f->private_data;
261         void __user *argp = (void __user *)arg;
262         u64 __user *featurep = argp;
263         int test;
264         u64 features;
265         int r;
266         switch (ioctl) {
267         case VHOST_TEST_RUN:
268                 if (copy_from_user(&test, argp, sizeof test))
269                         return -EFAULT;
270                 return vhost_test_run(n, test);
271         case VHOST_GET_FEATURES:
272                 features = VHOST_FEATURES;
273                 if (copy_to_user(featurep, &features, sizeof features))
274                         return -EFAULT;
275                 return 0;
276         case VHOST_SET_FEATURES:
277                 if (copy_from_user(&features, featurep, sizeof features))
278                         return -EFAULT;
279                 if (features & ~VHOST_FEATURES)
280                         return -EOPNOTSUPP;
281                 return vhost_test_set_features(n, features);
282         case VHOST_RESET_OWNER:
283                 return vhost_test_reset_owner(n);
284         default:
285                 mutex_lock(&n->dev.mutex);
286                 r = vhost_dev_ioctl(&n->dev, ioctl, argp);
287                 if (r == -ENOIOCTLCMD)
288                         r = vhost_vring_ioctl(&n->dev, ioctl, argp);
289                 vhost_test_flush(n);
290                 mutex_unlock(&n->dev.mutex);
291                 return r;
292         }
293 }
294
295 #ifdef CONFIG_COMPAT
296 static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl,
297                                    unsigned long arg)
298 {
299         return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
300 }
301 #endif
302
303 static const struct file_operations vhost_test_fops = {
304         .owner          = THIS_MODULE,
305         .release        = vhost_test_release,
306         .unlocked_ioctl = vhost_test_ioctl,
307 #ifdef CONFIG_COMPAT
308         .compat_ioctl   = vhost_test_compat_ioctl,
309 #endif
310         .open           = vhost_test_open,
311         .llseek         = noop_llseek,
312 };
313
314 static struct miscdevice vhost_test_misc = {
315         MISC_DYNAMIC_MINOR,
316         "vhost-test",
317         &vhost_test_fops,
318 };
319
320 static int vhost_test_init(void)
321 {
322         return misc_register(&vhost_test_misc);
323 }
324 module_init(vhost_test_init);
325
326 static void vhost_test_exit(void)
327 {
328         misc_deregister(&vhost_test_misc);
329 }
330 module_exit(vhost_test_exit);
331
332 MODULE_VERSION("0.0.1");
333 MODULE_LICENSE("GPL v2");
334 MODULE_AUTHOR("Michael S. Tsirkin");
335 MODULE_DESCRIPTION("Host kernel side for virtio simulator");