1 /******************************************************************************
4 * Driver for receiving and demuxing event-channel signals.
6 * Copyright (c) 2004-2005, K A Fraser
7 * Multi-process extensions Copyright (c) 2004, Steven Smith
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/string.h>
41 #include <linux/errno.h>
43 #include <linux/miscdevice.h>
44 #include <linux/major.h>
45 #include <linux/proc_fs.h>
46 #include <linux/stat.h>
47 #include <linux/poll.h>
48 #include <linux/irq.h>
49 #include <linux/init.h>
50 #include <linux/mutex.h>
51 #include <linux/cpu.h>
53 #include <linux/vmalloc.h>
56 #include <xen/events.h>
57 #include <xen/evtchn.h>
58 #include <xen/xen-ops.h>
59 #include <asm/xen/hypervisor.h>
61 struct per_user_data {
62 struct mutex bind_mutex; /* serialize bind/unbind operations */
63 struct rb_root evtchns;
64 unsigned int nr_evtchns;
66 /* Notification ring, accessed via /dev/xen/evtchn. */
67 unsigned int ring_size;
69 unsigned int ring_cons, ring_prod, ring_overflow;
70 struct mutex ring_cons_mutex; /* protect against concurrent readers */
71 spinlock_t ring_prod_lock; /* product against concurrent interrupts */
73 /* Processes wait on this queue when ring is empty. */
74 wait_queue_head_t evtchn_wait;
75 struct fasync_struct *evtchn_async_queue;
78 domid_t restrict_domid;
81 #define UNRESTRICTED_DOMID ((domid_t)-1)
85 struct per_user_data *user;
90 static void evtchn_free_ring(evtchn_port_t *ring)
95 static unsigned int evtchn_ring_offset(struct per_user_data *u,
98 return idx & (u->ring_size - 1);
101 static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u,
104 return u->ring + evtchn_ring_offset(u, idx);
107 static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
109 struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL;
114 struct user_evtchn *this;
116 this = rb_entry(*new, struct user_evtchn, node);
119 if (this->port < evtchn->port)
120 new = &((*new)->rb_left);
121 else if (this->port > evtchn->port)
122 new = &((*new)->rb_right);
127 /* Add new node and rebalance tree. */
128 rb_link_node(&evtchn->node, parent, new);
129 rb_insert_color(&evtchn->node, &u->evtchns);
134 static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
137 rb_erase(&evtchn->node, &u->evtchns);
141 static struct user_evtchn *find_evtchn(struct per_user_data *u,
144 struct rb_node *node = u->evtchns.rb_node;
147 struct user_evtchn *evtchn;
149 evtchn = rb_entry(node, struct user_evtchn, node);
151 if (evtchn->port < port)
152 node = node->rb_left;
153 else if (evtchn->port > port)
154 node = node->rb_right;
161 static irqreturn_t evtchn_interrupt(int irq, void *data)
163 struct user_evtchn *evtchn = data;
164 struct per_user_data *u = evtchn->user;
166 WARN(!evtchn->enabled,
167 "Interrupt for port %u, but apparently not enabled; per-user %p\n",
170 disable_irq_nosync(irq);
171 evtchn->enabled = false;
173 spin_lock(&u->ring_prod_lock);
175 if ((u->ring_prod - u->ring_cons) < u->ring_size) {
176 *evtchn_ring_entry(u, u->ring_prod) = evtchn->port;
177 wmb(); /* Ensure ring contents visible */
178 if (u->ring_cons == u->ring_prod++) {
179 wake_up_interruptible(&u->evtchn_wait);
180 kill_fasync(&u->evtchn_async_queue,
184 u->ring_overflow = 1;
186 spin_unlock(&u->ring_prod_lock);
191 static ssize_t evtchn_read(struct file *file, char __user *buf,
192 size_t count, loff_t *ppos)
195 unsigned int c, p, bytes1 = 0, bytes2 = 0;
196 struct per_user_data *u = file->private_data;
198 /* Whole number of ports. */
199 count &= ~(sizeof(evtchn_port_t)-1);
204 if (count > PAGE_SIZE)
208 mutex_lock(&u->ring_cons_mutex);
211 if (u->ring_overflow)
219 mutex_unlock(&u->ring_cons_mutex);
221 if (file->f_flags & O_NONBLOCK)
224 rc = wait_event_interruptible(u->evtchn_wait,
225 u->ring_cons != u->ring_prod);
230 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
231 if (((c ^ p) & u->ring_size) != 0) {
232 bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) *
233 sizeof(evtchn_port_t);
234 bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t);
236 bytes1 = (p - c) * sizeof(evtchn_port_t);
240 /* Truncate chunks according to caller's maximum byte count. */
241 if (bytes1 > count) {
244 } else if ((bytes1 + bytes2) > count) {
245 bytes2 = count - bytes1;
249 rmb(); /* Ensure that we see the port before we copy it. */
250 if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) ||
252 copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
255 u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
256 rc = bytes1 + bytes2;
259 mutex_unlock(&u->ring_cons_mutex);
263 static ssize_t evtchn_write(struct file *file, const char __user *buf,
264 size_t count, loff_t *ppos)
267 evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
268 struct per_user_data *u = file->private_data;
273 /* Whole number of ports. */
274 count &= ~(sizeof(evtchn_port_t)-1);
280 if (count > PAGE_SIZE)
284 if (copy_from_user(kbuf, buf, count) != 0)
287 mutex_lock(&u->bind_mutex);
289 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
290 evtchn_port_t port = kbuf[i];
291 struct user_evtchn *evtchn;
293 evtchn = find_evtchn(u, port);
294 if (evtchn && !evtchn->enabled) {
295 evtchn->enabled = true;
296 enable_irq(irq_from_evtchn(port));
300 mutex_unlock(&u->bind_mutex);
305 free_page((unsigned long)kbuf);
309 static int evtchn_resize_ring(struct per_user_data *u)
311 unsigned int new_size;
312 evtchn_port_t *new_ring, *old_ring;
315 * Ensure the ring is large enough to capture all possible
316 * events. i.e., one free slot for each bound event.
318 if (u->nr_evtchns <= u->ring_size)
321 if (u->ring_size == 0)
324 new_size = 2 * u->ring_size;
326 new_ring = kvmalloc_array(new_size, sizeof(*new_ring), GFP_KERNEL);
333 * Access to the ring contents is serialized by either the
334 * prod /or/ cons lock so take both when resizing.
336 mutex_lock(&u->ring_cons_mutex);
337 spin_lock_irq(&u->ring_prod_lock);
340 * Copy the old ring contents to the new ring.
342 * To take care of wrapping, a full ring, and the new index
343 * pointing into the second half, simply copy the old contents
346 * +---------+ +------------------+
347 * |34567 12| -> |34567 1234567 12|
348 * +-----p-c-+ +-------c------p---+
350 memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
351 memcpy(new_ring + u->ring_size, old_ring,
352 u->ring_size * sizeof(*u->ring));
355 u->ring_size = new_size;
357 spin_unlock_irq(&u->ring_prod_lock);
358 mutex_unlock(&u->ring_cons_mutex);
360 evtchn_free_ring(old_ring);
365 static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port)
367 struct user_evtchn *evtchn;
368 struct evtchn_close close;
372 * Ports are never reused, so every caller should pass in a
375 * (Locking not necessary because we haven't registered the
376 * interrupt handler yet, and our caller has already
377 * serialized bind operations.)
380 evtchn = kzalloc(sizeof(*evtchn), GFP_KERNEL);
386 evtchn->enabled = true; /* start enabled */
388 rc = add_evtchn(u, evtchn);
392 rc = evtchn_resize_ring(u);
396 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0,
401 rc = evtchn_make_refcounted(port);
405 /* bind failed, should close the port now */
407 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
409 del_evtchn(u, evtchn);
413 static void evtchn_unbind_from_user(struct per_user_data *u,
414 struct user_evtchn *evtchn)
416 int irq = irq_from_evtchn(evtchn->port);
420 unbind_from_irqhandler(irq, evtchn);
422 del_evtchn(u, evtchn);
425 static DEFINE_PER_CPU(int, bind_last_selected_cpu);
427 static void evtchn_bind_interdom_next_vcpu(evtchn_port_t evtchn)
429 unsigned int selected_cpu, irq;
430 struct irq_desc *desc;
433 irq = irq_from_evtchn(evtchn);
434 desc = irq_to_desc(irq);
439 raw_spin_lock_irqsave(&desc->lock, flags);
440 selected_cpu = this_cpu_read(bind_last_selected_cpu);
441 selected_cpu = cpumask_next_and(selected_cpu,
442 desc->irq_common_data.affinity, cpu_online_mask);
444 if (unlikely(selected_cpu >= nr_cpu_ids))
445 selected_cpu = cpumask_first_and(desc->irq_common_data.affinity,
448 this_cpu_write(bind_last_selected_cpu, selected_cpu);
450 /* unmask expects irqs to be disabled */
451 xen_set_affinity_evtchn(desc, selected_cpu);
452 raw_spin_unlock_irqrestore(&desc->lock, flags);
455 static long evtchn_ioctl(struct file *file,
456 unsigned int cmd, unsigned long arg)
459 struct per_user_data *u = file->private_data;
460 void __user *uarg = (void __user *) arg;
462 /* Prevent bind from racing with unbind */
463 mutex_lock(&u->bind_mutex);
466 case IOCTL_EVTCHN_BIND_VIRQ: {
467 struct ioctl_evtchn_bind_virq bind;
468 struct evtchn_bind_virq bind_virq;
471 if (u->restrict_domid != UNRESTRICTED_DOMID)
475 if (copy_from_user(&bind, uarg, sizeof(bind)))
478 bind_virq.virq = bind.virq;
479 bind_virq.vcpu = xen_vcpu_nr(0);
480 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
485 rc = evtchn_bind_to_user(u, bind_virq.port);
491 case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
492 struct ioctl_evtchn_bind_interdomain bind;
493 struct evtchn_bind_interdomain bind_interdomain;
496 if (copy_from_user(&bind, uarg, sizeof(bind)))
500 if (u->restrict_domid != UNRESTRICTED_DOMID &&
501 u->restrict_domid != bind.remote_domain)
504 bind_interdomain.remote_dom = bind.remote_domain;
505 bind_interdomain.remote_port = bind.remote_port;
506 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
511 rc = evtchn_bind_to_user(u, bind_interdomain.local_port);
513 rc = bind_interdomain.local_port;
514 evtchn_bind_interdom_next_vcpu(rc);
519 case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
520 struct ioctl_evtchn_bind_unbound_port bind;
521 struct evtchn_alloc_unbound alloc_unbound;
524 if (u->restrict_domid != UNRESTRICTED_DOMID)
528 if (copy_from_user(&bind, uarg, sizeof(bind)))
531 alloc_unbound.dom = DOMID_SELF;
532 alloc_unbound.remote_dom = bind.remote_domain;
533 rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
538 rc = evtchn_bind_to_user(u, alloc_unbound.port);
540 rc = alloc_unbound.port;
544 case IOCTL_EVTCHN_UNBIND: {
545 struct ioctl_evtchn_unbind unbind;
546 struct user_evtchn *evtchn;
549 if (copy_from_user(&unbind, uarg, sizeof(unbind)))
553 if (unbind.port >= xen_evtchn_nr_channels())
557 evtchn = find_evtchn(u, unbind.port);
561 disable_irq(irq_from_evtchn(unbind.port));
562 evtchn_unbind_from_user(u, evtchn);
567 case IOCTL_EVTCHN_NOTIFY: {
568 struct ioctl_evtchn_notify notify;
569 struct user_evtchn *evtchn;
572 if (copy_from_user(¬ify, uarg, sizeof(notify)))
576 evtchn = find_evtchn(u, notify.port);
578 notify_remote_via_evtchn(notify.port);
584 case IOCTL_EVTCHN_RESET: {
585 /* Initialise the ring to empty. Clear errors. */
586 mutex_lock(&u->ring_cons_mutex);
587 spin_lock_irq(&u->ring_prod_lock);
588 u->ring_cons = u->ring_prod = u->ring_overflow = 0;
589 spin_unlock_irq(&u->ring_prod_lock);
590 mutex_unlock(&u->ring_cons_mutex);
595 case IOCTL_EVTCHN_RESTRICT_DOMID: {
596 struct ioctl_evtchn_restrict_domid ierd;
599 if (u->restrict_domid != UNRESTRICTED_DOMID)
603 if (copy_from_user(&ierd, uarg, sizeof(ierd)))
607 if (ierd.domid == 0 || ierd.domid >= DOMID_FIRST_RESERVED)
610 u->restrict_domid = ierd.domid;
620 mutex_unlock(&u->bind_mutex);
625 static __poll_t evtchn_poll(struct file *file, poll_table *wait)
627 __poll_t mask = EPOLLOUT | EPOLLWRNORM;
628 struct per_user_data *u = file->private_data;
630 poll_wait(file, &u->evtchn_wait, wait);
631 if (u->ring_cons != u->ring_prod)
632 mask |= EPOLLIN | EPOLLRDNORM;
633 if (u->ring_overflow)
638 static int evtchn_fasync(int fd, struct file *filp, int on)
640 struct per_user_data *u = filp->private_data;
641 return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
644 static int evtchn_open(struct inode *inode, struct file *filp)
646 struct per_user_data *u;
648 u = kzalloc(sizeof(*u), GFP_KERNEL);
652 u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm);
653 if (u->name == NULL) {
658 init_waitqueue_head(&u->evtchn_wait);
660 mutex_init(&u->bind_mutex);
661 mutex_init(&u->ring_cons_mutex);
662 spin_lock_init(&u->ring_prod_lock);
664 u->restrict_domid = UNRESTRICTED_DOMID;
666 filp->private_data = u;
668 return stream_open(inode, filp);
671 static int evtchn_release(struct inode *inode, struct file *filp)
673 struct per_user_data *u = filp->private_data;
674 struct rb_node *node;
676 while ((node = u->evtchns.rb_node)) {
677 struct user_evtchn *evtchn;
679 evtchn = rb_entry(node, struct user_evtchn, node);
680 disable_irq(irq_from_evtchn(evtchn->port));
681 evtchn_unbind_from_user(u, evtchn);
684 evtchn_free_ring(u->ring);
691 static const struct file_operations evtchn_fops = {
692 .owner = THIS_MODULE,
694 .write = evtchn_write,
695 .unlocked_ioctl = evtchn_ioctl,
697 .fasync = evtchn_fasync,
699 .release = evtchn_release,
703 static struct miscdevice evtchn_miscdev = {
704 .minor = MISC_DYNAMIC_MINOR,
705 .name = "xen/evtchn",
706 .fops = &evtchn_fops,
708 static int __init evtchn_init(void)
715 /* Create '/dev/xen/evtchn'. */
716 err = misc_register(&evtchn_miscdev);
718 pr_err("Could not register /dev/xen/evtchn\n");
722 pr_info("Event-channel device installed\n");
727 static void __exit evtchn_cleanup(void)
729 misc_deregister(&evtchn_miscdev);
732 module_init(evtchn_init);
733 module_exit(evtchn_cleanup);
735 MODULE_LICENSE("GPL");