1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
5 * Interface to privileged domain-0 commands.
7 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
10 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
12 #include <linux/eventfd.h>
13 #include <linux/file.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/poll.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/workqueue.h>
22 #include <linux/errno.h>
24 #include <linux/mman.h>
25 #include <linux/uaccess.h>
26 #include <linux/swap.h>
27 #include <linux/highmem.h>
28 #include <linux/pagemap.h>
29 #include <linux/seq_file.h>
30 #include <linux/miscdevice.h>
31 #include <linux/moduleparam.h>
33 #include <asm/xen/hypervisor.h>
34 #include <asm/xen/hypercall.h>
37 #include <xen/privcmd.h>
38 #include <xen/interface/xen.h>
39 #include <xen/interface/memory.h>
40 #include <xen/interface/hvm/dm_op.h>
41 #include <xen/features.h>
43 #include <xen/xen-ops.h>
44 #include <xen/balloon.h>
48 MODULE_LICENSE("GPL");
50 #define PRIV_VMA_LOCKED ((void *)1)
52 static unsigned int privcmd_dm_op_max_num = 16;
53 module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
54 MODULE_PARM_DESC(dm_op_max_nr_bufs,
55 "Maximum number of buffers per dm_op hypercall");
57 static unsigned int privcmd_dm_op_buf_max_size = 4096;
58 module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
60 MODULE_PARM_DESC(dm_op_buf_max_size,
61 "Maximum size of a dm_op hypercall buffer");
67 static int privcmd_vma_range_is_mapped(
68 struct vm_area_struct *vma,
70 unsigned long nr_pages);
72 static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
74 struct privcmd_data *data = file->private_data;
75 struct privcmd_hypercall hypercall;
78 /* Disallow arbitrary hypercalls if restricted */
79 if (data->domid != DOMID_INVALID)
82 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
85 xen_preemptible_hcall_begin();
86 ret = privcmd_call(hypercall.op,
87 hypercall.arg[0], hypercall.arg[1],
88 hypercall.arg[2], hypercall.arg[3],
90 xen_preemptible_hcall_end();
95 static void free_page_list(struct list_head *pages)
99 list_for_each_entry_safe(p, n, pages, lru)
102 INIT_LIST_HEAD(pages);
106 * Given an array of items in userspace, return a list of pages
107 * containing the data. If copying fails, either because of memory
108 * allocation failure or a problem reading user memory, return an
109 * error code; its up to the caller to dispose of any partial list.
111 static int gather_array(struct list_head *pagelist,
112 unsigned nelem, size_t size,
113 const void __user *data)
119 if (size > PAGE_SIZE)
123 pagedata = NULL; /* quiet, gcc */
125 if (pageidx > PAGE_SIZE-size) {
126 struct page *page = alloc_page(GFP_KERNEL);
132 pagedata = page_address(page);
134 list_add_tail(&page->lru, pagelist);
139 if (copy_from_user(pagedata + pageidx, data, size))
153 * Call function "fn" on each element of the array fragmented
154 * over a list of pages.
156 static int traverse_pages(unsigned nelem, size_t size,
157 struct list_head *pos,
158 int (*fn)(void *data, void *state),
165 BUG_ON(size > PAGE_SIZE);
168 pagedata = NULL; /* hush, gcc */
171 if (pageidx > PAGE_SIZE-size) {
174 page = list_entry(pos, struct page, lru);
175 pagedata = page_address(page);
179 ret = (*fn)(pagedata + pageidx, state);
189 * Similar to traverse_pages, but use each page as a "block" of
190 * data to be processed as one unit.
192 static int traverse_pages_block(unsigned nelem, size_t size,
193 struct list_head *pos,
194 int (*fn)(void *data, int nr, void *state),
200 BUG_ON(size > PAGE_SIZE);
203 int nr = (PAGE_SIZE/size);
208 page = list_entry(pos, struct page, lru);
209 pagedata = page_address(page);
210 ret = (*fn)(pagedata, nr, state);
219 struct mmap_gfn_state {
221 struct vm_area_struct *vma;
225 static int mmap_gfn_range(void *data, void *state)
227 struct privcmd_mmap_entry *msg = data;
228 struct mmap_gfn_state *st = state;
229 struct vm_area_struct *vma = st->vma;
232 /* Do not allow range to wrap the address space. */
233 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
234 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
237 /* Range chunks must be contiguous in va space. */
238 if ((msg->va != st->va) ||
239 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
242 rc = xen_remap_domain_gfn_range(vma,
244 msg->mfn, msg->npages,
250 st->va += msg->npages << PAGE_SHIFT;
255 static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
257 struct privcmd_data *data = file->private_data;
258 struct privcmd_mmap mmapcmd;
259 struct mm_struct *mm = current->mm;
260 struct vm_area_struct *vma;
263 struct mmap_gfn_state state;
265 /* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
266 if (xen_feature(XENFEAT_auto_translated_physmap))
269 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
272 /* If restriction is in place, check the domid matches */
273 if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
276 rc = gather_array(&pagelist,
277 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
280 if (rc || list_empty(&pagelist))
286 struct page *page = list_first_entry(&pagelist,
288 struct privcmd_mmap_entry *msg = page_address(page);
290 vma = vma_lookup(mm, msg->va);
293 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
295 vma->vm_private_data = PRIV_VMA_LOCKED;
298 state.va = vma->vm_start;
300 state.domain = mmapcmd.dom;
302 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
304 mmap_gfn_range, &state);
308 mmap_write_unlock(mm);
311 free_page_list(&pagelist);
316 struct mmap_batch_state {
319 struct vm_area_struct *vma;
323 * 1 if at least one error has happened (and no
324 * -ENOENT errors have happened)
325 * -ENOENT if at least 1 -ENOENT has happened.
330 /* User-space gfn array to store errors in the second pass for V1. */
331 xen_pfn_t __user *user_gfn;
332 /* User-space int array to store errors in the second pass for V2. */
333 int __user *user_err;
336 /* auto translated dom0 note: if domU being created is PV, then gfn is
337 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
339 static int mmap_batch_fn(void *data, int nr, void *state)
341 xen_pfn_t *gfnp = data;
342 struct mmap_batch_state *st = state;
343 struct vm_area_struct *vma = st->vma;
344 struct page **pages = vma->vm_private_data;
345 struct page **cur_pages = NULL;
348 if (xen_feature(XENFEAT_auto_translated_physmap))
349 cur_pages = &pages[st->index];
352 ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
353 (int *)gfnp, st->vma->vm_page_prot,
354 st->domain, cur_pages);
356 /* Adjust the global_error? */
359 st->global_error = -ENOENT;
361 /* Record that at least one error has happened. */
362 if (st->global_error == 0)
363 st->global_error = 1;
366 st->va += XEN_PAGE_SIZE * nr;
367 st->index += nr / XEN_PFN_PER_PAGE;
372 static int mmap_return_error(int err, struct mmap_batch_state *st)
376 if (st->version == 1) {
380 ret = get_user(gfn, st->user_gfn);
384 * V1 encodes the error codes in the 32bit top
385 * nibble of the gfn (with its known
386 * limitations vis-a-vis 64 bit callers).
388 gfn |= (err == -ENOENT) ?
389 PRIVCMD_MMAPBATCH_PAGED_ERROR :
390 PRIVCMD_MMAPBATCH_MFN_ERROR;
391 return __put_user(gfn, st->user_gfn++);
394 } else { /* st->version == 2 */
396 return __put_user(err, st->user_err++);
404 static int mmap_return_errors(void *data, int nr, void *state)
406 struct mmap_batch_state *st = state;
411 for (i = 0; i < nr; i++) {
412 ret = mmap_return_error(errs[i], st);
419 /* Allocate pfns that are then mapped with gfns from foreign domid. Update
420 * the vma with the page info to use later.
421 * Returns: 0 if success, otherwise -errno
423 static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
428 pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
432 rc = xen_alloc_unpopulated_pages(numpgs, pages);
434 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
439 BUG_ON(vma->vm_private_data != NULL);
440 vma->vm_private_data = pages;
445 static const struct vm_operations_struct privcmd_vm_ops;
447 static long privcmd_ioctl_mmap_batch(
448 struct file *file, void __user *udata, int version)
450 struct privcmd_data *data = file->private_data;
452 struct privcmd_mmapbatch_v2 m;
453 struct mm_struct *mm = current->mm;
454 struct vm_area_struct *vma;
455 unsigned long nr_pages;
457 struct mmap_batch_state state;
461 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
463 /* Returns per-frame error in m.arr. */
465 if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
469 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
471 /* Returns per-frame error code in m.err. */
472 if (!access_ok(m.err, m.num * (sizeof(*m.err))))
479 /* If restriction is in place, check the domid matches */
480 if (data->domid != DOMID_INVALID && data->domid != m.dom)
483 nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
484 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
487 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
491 if (list_empty(&pagelist)) {
497 /* Zero error array now to only copy back actual errors. */
498 if (clear_user(m.err, sizeof(int) * m.num)) {
506 vma = find_vma(mm, m.addr);
508 vma->vm_ops != &privcmd_vm_ops) {
514 * Caller must either:
516 * Map the whole VMA range, which will also allocate all the
517 * pages required for the auto_translated_physmap case.
521 * Map unmapped holes left from a previous map attempt (e.g.,
522 * because those foreign frames were previously paged out).
524 if (vma->vm_private_data == NULL) {
525 if (m.addr != vma->vm_start ||
526 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
530 if (xen_feature(XENFEAT_auto_translated_physmap)) {
531 ret = alloc_empty_pages(vma, nr_pages);
535 vma->vm_private_data = PRIV_VMA_LOCKED;
537 if (m.addr < vma->vm_start ||
538 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
542 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
548 state.domain = m.dom;
552 state.global_error = 0;
553 state.version = version;
555 BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
556 /* mmap_batch_fn guarantees ret == 0 */
557 BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
558 &pagelist, mmap_batch_fn, &state));
560 mmap_write_unlock(mm);
562 if (state.global_error) {
563 /* Write back errors in second pass. */
564 state.user_gfn = (xen_pfn_t *)m.arr;
565 state.user_err = m.err;
566 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
567 &pagelist, mmap_return_errors, &state);
571 /* If we have not had any EFAULT-like global errors then set the global
572 * error to -ENOENT if necessary. */
573 if ((ret == 0) && (state.global_error == -ENOENT))
577 free_page_list(&pagelist);
581 mmap_write_unlock(mm);
585 static int lock_pages(
586 struct privcmd_dm_op_buf kbufs[], unsigned int num,
587 struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
589 unsigned int i, off = 0;
591 for (i = 0; i < num; ) {
592 unsigned int requested;
595 requested = DIV_ROUND_UP(
596 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
598 if (requested > nr_pages)
601 page_count = pin_user_pages_fast(
602 (unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
603 requested, FOLL_WRITE, pages);
605 return page_count ? : -EFAULT;
607 *pinned += page_count;
608 nr_pages -= page_count;
611 off = (requested == page_count) ? 0 : off + page_count;
618 static void unlock_pages(struct page *pages[], unsigned int nr_pages)
620 unpin_user_pages_dirty_lock(pages, nr_pages, true);
623 static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
625 struct privcmd_data *data = file->private_data;
626 struct privcmd_dm_op kdata;
627 struct privcmd_dm_op_buf *kbufs;
628 unsigned int nr_pages = 0;
629 struct page **pages = NULL;
630 struct xen_dm_op_buf *xbufs = NULL;
633 unsigned int pinned = 0;
635 if (copy_from_user(&kdata, udata, sizeof(kdata)))
638 /* If restriction is in place, check the domid matches */
639 if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
645 if (kdata.num > privcmd_dm_op_max_num)
648 kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
652 if (copy_from_user(kbufs, kdata.ubufs,
653 sizeof(*kbufs) * kdata.num)) {
658 for (i = 0; i < kdata.num; i++) {
659 if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
664 if (!access_ok(kbufs[i].uptr,
670 nr_pages += DIV_ROUND_UP(
671 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
675 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
681 xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
687 rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
691 for (i = 0; i < kdata.num; i++) {
692 set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
693 xbufs[i].size = kbufs[i].size;
696 xen_preemptible_hcall_begin();
697 rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
698 xen_preemptible_hcall_end();
701 unlock_pages(pages, pinned);
709 static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
711 struct privcmd_data *data = file->private_data;
714 if (copy_from_user(&dom, udata, sizeof(dom)))
717 /* Set restriction to the specified domain, or check it matches */
718 if (data->domid == DOMID_INVALID)
720 else if (data->domid != dom)
726 static long privcmd_ioctl_mmap_resource(struct file *file,
727 struct privcmd_mmap_resource __user *udata)
729 struct privcmd_data *data = file->private_data;
730 struct mm_struct *mm = current->mm;
731 struct vm_area_struct *vma;
732 struct privcmd_mmap_resource kdata;
733 xen_pfn_t *pfns = NULL;
734 struct xen_mem_acquire_resource xdata = { };
737 if (copy_from_user(&kdata, udata, sizeof(kdata)))
740 /* If restriction is in place, check the domid matches */
741 if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
744 /* Both fields must be set or unset */
745 if (!!kdata.addr != !!kdata.num)
748 xdata.domid = kdata.dom;
749 xdata.type = kdata.type;
752 if (!kdata.addr && !kdata.num) {
753 /* Query the size of the resource. */
754 rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
757 return __put_user(xdata.nr_frames, &udata->num);
762 vma = find_vma(mm, kdata.addr);
763 if (!vma || vma->vm_ops != &privcmd_vm_ops) {
768 pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
774 if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
775 xen_feature(XENFEAT_auto_translated_physmap)) {
776 unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
780 rc = alloc_empty_pages(vma, nr);
784 pages = vma->vm_private_data;
785 for (i = 0; i < kdata.num; i++) {
787 page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
789 pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
792 vma->vm_private_data = PRIV_VMA_LOCKED;
794 xdata.frame = kdata.idx;
795 xdata.nr_frames = kdata.num;
796 set_xen_guest_handle(xdata.frame_list, pfns);
798 xen_preemptible_hcall_begin();
799 rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
800 xen_preemptible_hcall_end();
805 if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
806 xen_feature(XENFEAT_auto_translated_physmap)) {
807 rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
810 (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
811 DOMID_SELF : kdata.dom;
812 int num, *errs = (int *)pfns;
814 BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
815 num = xen_remap_domain_mfn_array(vma,
816 kdata.addr & PAGE_MASK,
817 pfns, kdata.num, errs,
822 else if (num != kdata.num) {
825 for (i = 0; i < num; i++) {
835 mmap_write_unlock(mm);
841 #ifdef CONFIG_XEN_PRIVCMD_IRQFD
843 static struct workqueue_struct *irqfd_cleanup_wq;
844 static DEFINE_MUTEX(irqfds_lock);
845 static LIST_HEAD(irqfds_list);
847 struct privcmd_kernel_irqfd {
848 struct xen_dm_op_buf xbufs;
851 struct eventfd_ctx *eventfd;
852 struct work_struct shutdown;
853 wait_queue_entry_t wait;
854 struct list_head list;
858 static void irqfd_deactivate(struct privcmd_kernel_irqfd *kirqfd)
860 lockdep_assert_held(&irqfds_lock);
862 list_del_init(&kirqfd->list);
863 queue_work(irqfd_cleanup_wq, &kirqfd->shutdown);
866 static void irqfd_shutdown(struct work_struct *work)
868 struct privcmd_kernel_irqfd *kirqfd =
869 container_of(work, struct privcmd_kernel_irqfd, shutdown);
872 eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt);
873 eventfd_ctx_put(kirqfd->eventfd);
877 static void irqfd_inject(struct privcmd_kernel_irqfd *kirqfd)
882 eventfd_ctx_do_read(kirqfd->eventfd, &cnt);
884 xen_preemptible_hcall_begin();
885 rc = HYPERVISOR_dm_op(kirqfd->dom, 1, &kirqfd->xbufs);
886 xen_preemptible_hcall_end();
888 /* Don't repeat the error message for consecutive failures */
889 if (rc && !kirqfd->error) {
890 pr_err("Failed to configure irq for guest domain: %d\n",
898 irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
900 struct privcmd_kernel_irqfd *kirqfd =
901 container_of(wait, struct privcmd_kernel_irqfd, wait);
902 __poll_t flags = key_to_poll(key);
905 irqfd_inject(kirqfd);
907 if (flags & EPOLLHUP) {
908 mutex_lock(&irqfds_lock);
909 irqfd_deactivate(kirqfd);
910 mutex_unlock(&irqfds_lock);
917 irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
919 struct privcmd_kernel_irqfd *kirqfd =
920 container_of(pt, struct privcmd_kernel_irqfd, pt);
922 add_wait_queue_priority(wqh, &kirqfd->wait);
925 static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
927 struct privcmd_kernel_irqfd *kirqfd, *tmp;
933 kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL);
938 if (copy_from_user(dm_op, u64_to_user_ptr(irqfd->dm_op), irqfd->size)) {
943 kirqfd->xbufs.size = irqfd->size;
944 set_xen_guest_handle(kirqfd->xbufs.h, dm_op);
945 kirqfd->dom = irqfd->dom;
946 INIT_WORK(&kirqfd->shutdown, irqfd_shutdown);
948 f = fdget(irqfd->fd);
954 kirqfd->eventfd = eventfd_ctx_fileget(f.file);
955 if (IS_ERR(kirqfd->eventfd)) {
956 ret = PTR_ERR(kirqfd->eventfd);
961 * Install our own custom wake-up handling so we are notified via a
962 * callback whenever someone signals the underlying eventfd.
964 init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup);
965 init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
967 mutex_lock(&irqfds_lock);
969 list_for_each_entry(tmp, &irqfds_list, list) {
970 if (kirqfd->eventfd == tmp->eventfd) {
972 mutex_unlock(&irqfds_lock);
977 list_add_tail(&kirqfd->list, &irqfds_list);
978 mutex_unlock(&irqfds_lock);
981 * Check if there was an event already pending on the eventfd before we
982 * registered, and trigger it as if we didn't miss it.
984 events = vfs_poll(f.file, &kirqfd->pt);
985 if (events & EPOLLIN)
986 irqfd_inject(kirqfd);
989 * Do not drop the file until the kirqfd is fully initialized, otherwise
990 * we might race against the EPOLLHUP.
996 eventfd_ctx_put(kirqfd->eventfd);
1006 static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
1008 struct privcmd_kernel_irqfd *kirqfd;
1009 struct eventfd_ctx *eventfd;
1011 eventfd = eventfd_ctx_fdget(irqfd->fd);
1012 if (IS_ERR(eventfd))
1013 return PTR_ERR(eventfd);
1015 mutex_lock(&irqfds_lock);
1017 list_for_each_entry(kirqfd, &irqfds_list, list) {
1018 if (kirqfd->eventfd == eventfd) {
1019 irqfd_deactivate(kirqfd);
1024 mutex_unlock(&irqfds_lock);
1026 eventfd_ctx_put(eventfd);
1029 * Block until we know all outstanding shutdown jobs have completed so
1030 * that we guarantee there will not be any more interrupts once this
1031 * deassign function returns.
1033 flush_workqueue(irqfd_cleanup_wq);
1038 static long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
1040 struct privcmd_data *data = file->private_data;
1041 struct privcmd_irqfd irqfd;
1043 if (copy_from_user(&irqfd, udata, sizeof(irqfd)))
1046 /* No other flags should be set */
1047 if (irqfd.flags & ~PRIVCMD_IRQFD_FLAG_DEASSIGN)
1050 /* If restriction is in place, check the domid matches */
1051 if (data->domid != DOMID_INVALID && data->domid != irqfd.dom)
1054 if (irqfd.flags & PRIVCMD_IRQFD_FLAG_DEASSIGN)
1055 return privcmd_irqfd_deassign(&irqfd);
1057 return privcmd_irqfd_assign(&irqfd);
1060 static int privcmd_irqfd_init(void)
1062 irqfd_cleanup_wq = alloc_workqueue("privcmd-irqfd-cleanup", 0, 0);
1063 if (!irqfd_cleanup_wq)
1069 static void privcmd_irqfd_exit(void)
1071 struct privcmd_kernel_irqfd *kirqfd, *tmp;
1073 mutex_lock(&irqfds_lock);
1075 list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list)
1076 irqfd_deactivate(kirqfd);
1078 mutex_unlock(&irqfds_lock);
1080 destroy_workqueue(irqfd_cleanup_wq);
1083 static inline long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
1088 static inline int privcmd_irqfd_init(void)
1093 static inline void privcmd_irqfd_exit(void)
1096 #endif /* CONFIG_XEN_PRIVCMD_IRQFD */
1098 static long privcmd_ioctl(struct file *file,
1099 unsigned int cmd, unsigned long data)
1102 void __user *udata = (void __user *) data;
1105 case IOCTL_PRIVCMD_HYPERCALL:
1106 ret = privcmd_ioctl_hypercall(file, udata);
1109 case IOCTL_PRIVCMD_MMAP:
1110 ret = privcmd_ioctl_mmap(file, udata);
1113 case IOCTL_PRIVCMD_MMAPBATCH:
1114 ret = privcmd_ioctl_mmap_batch(file, udata, 1);
1117 case IOCTL_PRIVCMD_MMAPBATCH_V2:
1118 ret = privcmd_ioctl_mmap_batch(file, udata, 2);
1121 case IOCTL_PRIVCMD_DM_OP:
1122 ret = privcmd_ioctl_dm_op(file, udata);
1125 case IOCTL_PRIVCMD_RESTRICT:
1126 ret = privcmd_ioctl_restrict(file, udata);
1129 case IOCTL_PRIVCMD_MMAP_RESOURCE:
1130 ret = privcmd_ioctl_mmap_resource(file, udata);
1133 case IOCTL_PRIVCMD_IRQFD:
1134 ret = privcmd_ioctl_irqfd(file, udata);
1144 static int privcmd_open(struct inode *ino, struct file *file)
1146 struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
1151 /* DOMID_INVALID implies no restriction */
1152 data->domid = DOMID_INVALID;
1154 file->private_data = data;
1158 static int privcmd_release(struct inode *ino, struct file *file)
1160 struct privcmd_data *data = file->private_data;
1166 static void privcmd_close(struct vm_area_struct *vma)
1168 struct page **pages = vma->vm_private_data;
1169 int numpgs = vma_pages(vma);
1170 int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
1173 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
1176 rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
1178 xen_free_unpopulated_pages(numpgs, pages);
1180 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
1185 static vm_fault_t privcmd_fault(struct vm_fault *vmf)
1187 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
1188 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
1189 vmf->pgoff, (void *)vmf->address);
1191 return VM_FAULT_SIGBUS;
1194 static const struct vm_operations_struct privcmd_vm_ops = {
1195 .close = privcmd_close,
1196 .fault = privcmd_fault
1199 static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
1201 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
1202 * how to recreate these mappings */
1203 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
1204 VM_DONTEXPAND | VM_DONTDUMP);
1205 vma->vm_ops = &privcmd_vm_ops;
1206 vma->vm_private_data = NULL;
1212 * For MMAPBATCH*. This allows asserting the singleshot mapping
1213 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
1214 * can be then retried until success.
1216 static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
1218 return pte_none(ptep_get(pte)) ? 0 : -EBUSY;
1221 static int privcmd_vma_range_is_mapped(
1222 struct vm_area_struct *vma,
1224 unsigned long nr_pages)
1226 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
1227 is_mapped_fn, NULL) != 0;
1230 const struct file_operations xen_privcmd_fops = {
1231 .owner = THIS_MODULE,
1232 .unlocked_ioctl = privcmd_ioctl,
1233 .open = privcmd_open,
1234 .release = privcmd_release,
1235 .mmap = privcmd_mmap,
1237 EXPORT_SYMBOL_GPL(xen_privcmd_fops);
1239 static struct miscdevice privcmd_dev = {
1240 .minor = MISC_DYNAMIC_MINOR,
1241 .name = "xen/privcmd",
1242 .fops = &xen_privcmd_fops,
1245 static int __init privcmd_init(void)
1252 err = misc_register(&privcmd_dev);
1254 pr_err("Could not register Xen privcmd device\n");
1258 err = misc_register(&xen_privcmdbuf_dev);
1260 pr_err("Could not register Xen hypercall-buf device\n");
1261 goto err_privcmdbuf;
1264 err = privcmd_irqfd_init();
1266 pr_err("irqfd init failed\n");
1273 misc_deregister(&xen_privcmdbuf_dev);
1275 misc_deregister(&privcmd_dev);
1279 static void __exit privcmd_exit(void)
1281 privcmd_irqfd_exit();
1282 misc_deregister(&privcmd_dev);
1283 misc_deregister(&xen_privcmdbuf_dev);
1286 module_init(privcmd_init);
1287 module_exit(privcmd_exit);