1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
5 * Interface to privileged domain-0 commands.
7 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
10 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
19 #include <linux/mman.h>
20 #include <linux/uaccess.h>
21 #include <linux/swap.h>
22 #include <linux/highmem.h>
23 #include <linux/pagemap.h>
24 #include <linux/seq_file.h>
25 #include <linux/miscdevice.h>
26 #include <linux/moduleparam.h>
28 #include <asm/xen/hypervisor.h>
29 #include <asm/xen/hypercall.h>
32 #include <xen/privcmd.h>
33 #include <xen/interface/xen.h>
34 #include <xen/interface/memory.h>
35 #include <xen/interface/hvm/dm_op.h>
36 #include <xen/features.h>
38 #include <xen/xen-ops.h>
39 #include <xen/balloon.h>
43 MODULE_LICENSE("GPL");
45 #define PRIV_VMA_LOCKED ((void *)1)
47 static unsigned int privcmd_dm_op_max_num = 16;
48 module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
49 MODULE_PARM_DESC(dm_op_max_nr_bufs,
50 "Maximum number of buffers per dm_op hypercall");
52 static unsigned int privcmd_dm_op_buf_max_size = 4096;
53 module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
55 MODULE_PARM_DESC(dm_op_buf_max_size,
56 "Maximum size of a dm_op hypercall buffer");
62 static int privcmd_vma_range_is_mapped(
63 struct vm_area_struct *vma,
65 unsigned long nr_pages);
67 static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
69 struct privcmd_data *data = file->private_data;
70 struct privcmd_hypercall hypercall;
73 /* Disallow arbitrary hypercalls if restricted */
74 if (data->domid != DOMID_INVALID)
77 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
80 xen_preemptible_hcall_begin();
81 ret = privcmd_call(hypercall.op,
82 hypercall.arg[0], hypercall.arg[1],
83 hypercall.arg[2], hypercall.arg[3],
85 xen_preemptible_hcall_end();
90 static void free_page_list(struct list_head *pages)
94 list_for_each_entry_safe(p, n, pages, lru)
97 INIT_LIST_HEAD(pages);
101 * Given an array of items in userspace, return a list of pages
102 * containing the data. If copying fails, either because of memory
103 * allocation failure or a problem reading user memory, return an
104 * error code; its up to the caller to dispose of any partial list.
106 static int gather_array(struct list_head *pagelist,
107 unsigned nelem, size_t size,
108 const void __user *data)
114 if (size > PAGE_SIZE)
118 pagedata = NULL; /* quiet, gcc */
120 if (pageidx > PAGE_SIZE-size) {
121 struct page *page = alloc_page(GFP_KERNEL);
127 pagedata = page_address(page);
129 list_add_tail(&page->lru, pagelist);
134 if (copy_from_user(pagedata + pageidx, data, size))
148 * Call function "fn" on each element of the array fragmented
149 * over a list of pages.
151 static int traverse_pages(unsigned nelem, size_t size,
152 struct list_head *pos,
153 int (*fn)(void *data, void *state),
160 BUG_ON(size > PAGE_SIZE);
163 pagedata = NULL; /* hush, gcc */
166 if (pageidx > PAGE_SIZE-size) {
169 page = list_entry(pos, struct page, lru);
170 pagedata = page_address(page);
174 ret = (*fn)(pagedata + pageidx, state);
184 * Similar to traverse_pages, but use each page as a "block" of
185 * data to be processed as one unit.
187 static int traverse_pages_block(unsigned nelem, size_t size,
188 struct list_head *pos,
189 int (*fn)(void *data, int nr, void *state),
195 BUG_ON(size > PAGE_SIZE);
198 int nr = (PAGE_SIZE/size);
203 page = list_entry(pos, struct page, lru);
204 pagedata = page_address(page);
205 ret = (*fn)(pagedata, nr, state);
214 struct mmap_gfn_state {
216 struct vm_area_struct *vma;
220 static int mmap_gfn_range(void *data, void *state)
222 struct privcmd_mmap_entry *msg = data;
223 struct mmap_gfn_state *st = state;
224 struct vm_area_struct *vma = st->vma;
227 /* Do not allow range to wrap the address space. */
228 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
229 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
232 /* Range chunks must be contiguous in va space. */
233 if ((msg->va != st->va) ||
234 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
237 rc = xen_remap_domain_gfn_range(vma,
239 msg->mfn, msg->npages,
245 st->va += msg->npages << PAGE_SHIFT;
250 static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
252 struct privcmd_data *data = file->private_data;
253 struct privcmd_mmap mmapcmd;
254 struct mm_struct *mm = current->mm;
255 struct vm_area_struct *vma;
258 struct mmap_gfn_state state;
260 /* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
261 if (xen_feature(XENFEAT_auto_translated_physmap))
264 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
267 /* If restriction is in place, check the domid matches */
268 if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
271 rc = gather_array(&pagelist,
272 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
275 if (rc || list_empty(&pagelist))
281 struct page *page = list_first_entry(&pagelist,
283 struct privcmd_mmap_entry *msg = page_address(page);
285 vma = vma_lookup(mm, msg->va);
288 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
290 vma->vm_private_data = PRIV_VMA_LOCKED;
293 state.va = vma->vm_start;
295 state.domain = mmapcmd.dom;
297 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
299 mmap_gfn_range, &state);
303 mmap_write_unlock(mm);
306 free_page_list(&pagelist);
311 struct mmap_batch_state {
314 struct vm_area_struct *vma;
318 * 1 if at least one error has happened (and no
319 * -ENOENT errors have happened)
320 * -ENOENT if at least 1 -ENOENT has happened.
325 /* User-space gfn array to store errors in the second pass for V1. */
326 xen_pfn_t __user *user_gfn;
327 /* User-space int array to store errors in the second pass for V2. */
328 int __user *user_err;
331 /* auto translated dom0 note: if domU being created is PV, then gfn is
332 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
334 static int mmap_batch_fn(void *data, int nr, void *state)
336 xen_pfn_t *gfnp = data;
337 struct mmap_batch_state *st = state;
338 struct vm_area_struct *vma = st->vma;
339 struct page **pages = vma->vm_private_data;
340 struct page **cur_pages = NULL;
343 if (xen_feature(XENFEAT_auto_translated_physmap))
344 cur_pages = &pages[st->index];
347 ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
348 (int *)gfnp, st->vma->vm_page_prot,
349 st->domain, cur_pages);
351 /* Adjust the global_error? */
354 st->global_error = -ENOENT;
356 /* Record that at least one error has happened. */
357 if (st->global_error == 0)
358 st->global_error = 1;
361 st->va += XEN_PAGE_SIZE * nr;
362 st->index += nr / XEN_PFN_PER_PAGE;
367 static int mmap_return_error(int err, struct mmap_batch_state *st)
371 if (st->version == 1) {
375 ret = get_user(gfn, st->user_gfn);
379 * V1 encodes the error codes in the 32bit top
380 * nibble of the gfn (with its known
381 * limitations vis-a-vis 64 bit callers).
383 gfn |= (err == -ENOENT) ?
384 PRIVCMD_MMAPBATCH_PAGED_ERROR :
385 PRIVCMD_MMAPBATCH_MFN_ERROR;
386 return __put_user(gfn, st->user_gfn++);
389 } else { /* st->version == 2 */
391 return __put_user(err, st->user_err++);
399 static int mmap_return_errors(void *data, int nr, void *state)
401 struct mmap_batch_state *st = state;
406 for (i = 0; i < nr; i++) {
407 ret = mmap_return_error(errs[i], st);
414 /* Allocate pfns that are then mapped with gfns from foreign domid. Update
415 * the vma with the page info to use later.
416 * Returns: 0 if success, otherwise -errno
418 static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
423 pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
427 rc = xen_alloc_unpopulated_pages(numpgs, pages);
429 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
434 BUG_ON(vma->vm_private_data != NULL);
435 vma->vm_private_data = pages;
440 static const struct vm_operations_struct privcmd_vm_ops;
442 static long privcmd_ioctl_mmap_batch(
443 struct file *file, void __user *udata, int version)
445 struct privcmd_data *data = file->private_data;
447 struct privcmd_mmapbatch_v2 m;
448 struct mm_struct *mm = current->mm;
449 struct vm_area_struct *vma;
450 unsigned long nr_pages;
452 struct mmap_batch_state state;
456 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
458 /* Returns per-frame error in m.arr. */
460 if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
464 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
466 /* Returns per-frame error code in m.err. */
467 if (!access_ok(m.err, m.num * (sizeof(*m.err))))
474 /* If restriction is in place, check the domid matches */
475 if (data->domid != DOMID_INVALID && data->domid != m.dom)
478 nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
479 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
482 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
486 if (list_empty(&pagelist)) {
492 /* Zero error array now to only copy back actual errors. */
493 if (clear_user(m.err, sizeof(int) * m.num)) {
501 vma = find_vma(mm, m.addr);
503 vma->vm_ops != &privcmd_vm_ops) {
509 * Caller must either:
511 * Map the whole VMA range, which will also allocate all the
512 * pages required for the auto_translated_physmap case.
516 * Map unmapped holes left from a previous map attempt (e.g.,
517 * because those foreign frames were previously paged out).
519 if (vma->vm_private_data == NULL) {
520 if (m.addr != vma->vm_start ||
521 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
525 if (xen_feature(XENFEAT_auto_translated_physmap)) {
526 ret = alloc_empty_pages(vma, nr_pages);
530 vma->vm_private_data = PRIV_VMA_LOCKED;
532 if (m.addr < vma->vm_start ||
533 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
537 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
543 state.domain = m.dom;
547 state.global_error = 0;
548 state.version = version;
550 BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
551 /* mmap_batch_fn guarantees ret == 0 */
552 BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
553 &pagelist, mmap_batch_fn, &state));
555 mmap_write_unlock(mm);
557 if (state.global_error) {
558 /* Write back errors in second pass. */
559 state.user_gfn = (xen_pfn_t *)m.arr;
560 state.user_err = m.err;
561 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
562 &pagelist, mmap_return_errors, &state);
566 /* If we have not had any EFAULT-like global errors then set the global
567 * error to -ENOENT if necessary. */
568 if ((ret == 0) && (state.global_error == -ENOENT))
572 free_page_list(&pagelist);
576 mmap_write_unlock(mm);
580 static int lock_pages(
581 struct privcmd_dm_op_buf kbufs[], unsigned int num,
582 struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
584 unsigned int i, off = 0;
586 for (i = 0; i < num; ) {
587 unsigned int requested;
590 requested = DIV_ROUND_UP(
591 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
593 if (requested > nr_pages)
596 page_count = pin_user_pages_fast(
597 (unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
598 requested, FOLL_WRITE, pages);
600 return page_count ? : -EFAULT;
602 *pinned += page_count;
603 nr_pages -= page_count;
606 off = (requested == page_count) ? 0 : off + page_count;
613 static void unlock_pages(struct page *pages[], unsigned int nr_pages)
615 unpin_user_pages_dirty_lock(pages, nr_pages, true);
618 static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
620 struct privcmd_data *data = file->private_data;
621 struct privcmd_dm_op kdata;
622 struct privcmd_dm_op_buf *kbufs;
623 unsigned int nr_pages = 0;
624 struct page **pages = NULL;
625 struct xen_dm_op_buf *xbufs = NULL;
628 unsigned int pinned = 0;
630 if (copy_from_user(&kdata, udata, sizeof(kdata)))
633 /* If restriction is in place, check the domid matches */
634 if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
640 if (kdata.num > privcmd_dm_op_max_num)
643 kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
647 if (copy_from_user(kbufs, kdata.ubufs,
648 sizeof(*kbufs) * kdata.num)) {
653 for (i = 0; i < kdata.num; i++) {
654 if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
659 if (!access_ok(kbufs[i].uptr,
665 nr_pages += DIV_ROUND_UP(
666 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
670 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
676 xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
682 rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
686 for (i = 0; i < kdata.num; i++) {
687 set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
688 xbufs[i].size = kbufs[i].size;
691 xen_preemptible_hcall_begin();
692 rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
693 xen_preemptible_hcall_end();
696 unlock_pages(pages, pinned);
704 static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
706 struct privcmd_data *data = file->private_data;
709 if (copy_from_user(&dom, udata, sizeof(dom)))
712 /* Set restriction to the specified domain, or check it matches */
713 if (data->domid == DOMID_INVALID)
715 else if (data->domid != dom)
721 static long privcmd_ioctl_mmap_resource(struct file *file,
722 struct privcmd_mmap_resource __user *udata)
724 struct privcmd_data *data = file->private_data;
725 struct mm_struct *mm = current->mm;
726 struct vm_area_struct *vma;
727 struct privcmd_mmap_resource kdata;
728 xen_pfn_t *pfns = NULL;
729 struct xen_mem_acquire_resource xdata = { };
732 if (copy_from_user(&kdata, udata, sizeof(kdata)))
735 /* If restriction is in place, check the domid matches */
736 if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
739 /* Both fields must be set or unset */
740 if (!!kdata.addr != !!kdata.num)
743 xdata.domid = kdata.dom;
744 xdata.type = kdata.type;
747 if (!kdata.addr && !kdata.num) {
748 /* Query the size of the resource. */
749 rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
752 return __put_user(xdata.nr_frames, &udata->num);
757 vma = find_vma(mm, kdata.addr);
758 if (!vma || vma->vm_ops != &privcmd_vm_ops) {
763 pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
769 if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
770 xen_feature(XENFEAT_auto_translated_physmap)) {
771 unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
775 rc = alloc_empty_pages(vma, nr);
779 pages = vma->vm_private_data;
780 for (i = 0; i < kdata.num; i++) {
782 page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
784 pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
787 vma->vm_private_data = PRIV_VMA_LOCKED;
789 xdata.frame = kdata.idx;
790 xdata.nr_frames = kdata.num;
791 set_xen_guest_handle(xdata.frame_list, pfns);
793 xen_preemptible_hcall_begin();
794 rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
795 xen_preemptible_hcall_end();
800 if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
801 xen_feature(XENFEAT_auto_translated_physmap)) {
802 rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
805 (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
806 DOMID_SELF : kdata.dom;
807 int num, *errs = (int *)pfns;
809 BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
810 num = xen_remap_domain_mfn_array(vma,
811 kdata.addr & PAGE_MASK,
812 pfns, kdata.num, errs,
817 else if (num != kdata.num) {
820 for (i = 0; i < num; i++) {
830 mmap_write_unlock(mm);
836 static long privcmd_ioctl(struct file *file,
837 unsigned int cmd, unsigned long data)
840 void __user *udata = (void __user *) data;
843 case IOCTL_PRIVCMD_HYPERCALL:
844 ret = privcmd_ioctl_hypercall(file, udata);
847 case IOCTL_PRIVCMD_MMAP:
848 ret = privcmd_ioctl_mmap(file, udata);
851 case IOCTL_PRIVCMD_MMAPBATCH:
852 ret = privcmd_ioctl_mmap_batch(file, udata, 1);
855 case IOCTL_PRIVCMD_MMAPBATCH_V2:
856 ret = privcmd_ioctl_mmap_batch(file, udata, 2);
859 case IOCTL_PRIVCMD_DM_OP:
860 ret = privcmd_ioctl_dm_op(file, udata);
863 case IOCTL_PRIVCMD_RESTRICT:
864 ret = privcmd_ioctl_restrict(file, udata);
867 case IOCTL_PRIVCMD_MMAP_RESOURCE:
868 ret = privcmd_ioctl_mmap_resource(file, udata);
878 static int privcmd_open(struct inode *ino, struct file *file)
880 struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
885 /* DOMID_INVALID implies no restriction */
886 data->domid = DOMID_INVALID;
888 file->private_data = data;
892 static int privcmd_release(struct inode *ino, struct file *file)
894 struct privcmd_data *data = file->private_data;
900 static void privcmd_close(struct vm_area_struct *vma)
902 struct page **pages = vma->vm_private_data;
903 int numpgs = vma_pages(vma);
904 int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
907 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
910 rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
912 xen_free_unpopulated_pages(numpgs, pages);
914 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
919 static vm_fault_t privcmd_fault(struct vm_fault *vmf)
921 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
922 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
923 vmf->pgoff, (void *)vmf->address);
925 return VM_FAULT_SIGBUS;
928 static const struct vm_operations_struct privcmd_vm_ops = {
929 .close = privcmd_close,
930 .fault = privcmd_fault
933 static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
935 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
936 * how to recreate these mappings */
937 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
938 VM_DONTEXPAND | VM_DONTDUMP);
939 vma->vm_ops = &privcmd_vm_ops;
940 vma->vm_private_data = NULL;
946 * For MMAPBATCH*. This allows asserting the singleshot mapping
947 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
948 * can be then retried until success.
950 static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
952 return pte_none(ptep_get(pte)) ? 0 : -EBUSY;
955 static int privcmd_vma_range_is_mapped(
956 struct vm_area_struct *vma,
958 unsigned long nr_pages)
960 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
961 is_mapped_fn, NULL) != 0;
964 const struct file_operations xen_privcmd_fops = {
965 .owner = THIS_MODULE,
966 .unlocked_ioctl = privcmd_ioctl,
967 .open = privcmd_open,
968 .release = privcmd_release,
969 .mmap = privcmd_mmap,
971 EXPORT_SYMBOL_GPL(xen_privcmd_fops);
973 static struct miscdevice privcmd_dev = {
974 .minor = MISC_DYNAMIC_MINOR,
975 .name = "xen/privcmd",
976 .fops = &xen_privcmd_fops,
979 static int __init privcmd_init(void)
986 err = misc_register(&privcmd_dev);
988 pr_err("Could not register Xen privcmd device\n");
992 err = misc_register(&xen_privcmdbuf_dev);
994 pr_err("Could not register Xen hypercall-buf device\n");
995 misc_deregister(&privcmd_dev);
1002 static void __exit privcmd_exit(void)
1004 misc_deregister(&privcmd_dev);
1005 misc_deregister(&xen_privcmdbuf_dev);
1008 module_init(privcmd_init);
1009 module_exit(privcmd_exit);