4 #include <linux/eventfd.h>
5 #include <linux/vhost.h>
7 #include <linux/mutex.h>
8 #include <linux/poll.h>
9 #include <linux/file.h>
10 #include <linux/uio.h>
11 #include <linux/virtio_config.h>
12 #include <linux/virtio_ring.h>
13 #include <linux/atomic.h>
18 typedef void (*vhost_work_fn_t)(struct vhost_work *work);
21 struct list_head node;
23 wait_queue_head_t done;
29 /* Poll a file (eventfd or socket) */
30 /* Note: there's nothing vhost specific about this structure. */
33 wait_queue_head_t *wqh;
35 struct vhost_work work;
37 struct vhost_dev *dev;
40 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
41 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
43 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
44 unsigned long mask, struct vhost_dev *dev);
45 int vhost_poll_start(struct vhost_poll *poll, struct file *file);
46 void vhost_poll_stop(struct vhost_poll *poll);
47 void vhost_poll_flush(struct vhost_poll *poll);
48 void vhost_poll_queue(struct vhost_poll *poll);
49 void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
50 long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
57 struct vhost_virtqueue;
59 /* The virtqueue structure describes a queue attached to a device. */
60 struct vhost_virtqueue {
61 struct vhost_dev *dev;
63 /* The actual ring of buffers. */
66 struct vring_desc __user *desc;
67 struct vring_avail __user *avail;
68 struct vring_used __user *used;
72 struct eventfd_ctx *call_ctx;
73 struct eventfd_ctx *error_ctx;
74 struct eventfd_ctx *log_ctx;
76 struct vhost_poll poll;
78 /* The routine to call when the Guest pings us, or timeout. */
79 vhost_work_fn_t handle_kick;
81 /* Last available index we saw. */
84 /* Caches available index value from user. */
87 /* Last index we used. */
93 /* Last used index value we have signalled on */
96 /* Last used index value we have signalled on */
97 bool signalled_used_valid;
99 /* Log writes to used structure. */
103 struct iovec iov[UIO_MAXIOV];
104 struct iovec *indirect;
105 struct vring_used_elem *heads;
106 /* We use a kind of RCU to access private pointer.
107 * All readers access it from worker, which makes it possible to
108 * flush the vhost_work instead of synchronize_rcu. Therefore readers do
109 * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
110 * vhost_work execution acts instead of rcu_read_lock() and the end of
111 * vhost_work execution acts instead of rcu_read_unlock().
112 * Writers use virtqueue mutex. */
113 void __rcu *private_data;
114 /* Log write descriptors */
115 void __user *log_base;
116 struct vhost_log *log;
120 /* Readers use RCU to access memory table pointer
121 * log base pointer and features.
122 * Writers use mutex below.*/
123 struct vhost_memory __rcu *memory;
124 struct mm_struct *mm;
126 unsigned acked_features;
127 struct vhost_virtqueue **vqs;
129 struct file *log_file;
130 struct eventfd_ctx *log_ctx;
131 spinlock_t work_lock;
132 struct list_head work_list;
133 struct task_struct *worker;
136 long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
137 long vhost_dev_set_owner(struct vhost_dev *dev);
138 bool vhost_dev_has_owner(struct vhost_dev *dev);
139 long vhost_dev_check_owner(struct vhost_dev *);
140 struct vhost_memory *vhost_dev_reset_owner_prepare(void);
141 void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
142 void vhost_dev_cleanup(struct vhost_dev *, bool locked);
143 void vhost_dev_stop(struct vhost_dev *);
144 long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
145 long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
146 int vhost_vq_access_ok(struct vhost_virtqueue *vq);
147 int vhost_log_access_ok(struct vhost_dev *);
149 int vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
150 struct iovec iov[], unsigned int iov_count,
151 unsigned int *out_num, unsigned int *in_num,
152 struct vhost_log *log, unsigned int *log_num);
153 void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
155 int vhost_init_used(struct vhost_virtqueue *);
156 int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
157 int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
159 void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
160 unsigned int id, int len);
161 void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
162 struct vring_used_elem *heads, unsigned count);
163 void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
164 void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
165 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
167 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
168 unsigned int log_num, u64 len);
170 #define vq_err(vq, fmt, ...) do { \
171 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
172 if ((vq)->error_ctx) \
173 eventfd_signal((vq)->error_ctx, 1);\
177 VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
178 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
179 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
180 (1ULL << VHOST_F_LOG_ALL),
183 static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
185 unsigned acked_features;
187 /* TODO: check that we are running from vhost_worker or dev mutex is
189 acked_features = rcu_dereference_index_check(dev->acked_features, 1);
190 return acked_features & (1 << bit);