2 * KVMGT - the implementation of Intel mediated pass-through framework for KVM
4 * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Kevin Tian <kevin.tian@intel.com>
27 * Jike Song <jike.song@intel.com>
28 * Xiaoguang Chen <xiaoguang.chen@intel.com>
31 #include <linux/init.h>
32 #include <linux/device.h>
34 #include <linux/mmu_context.h>
35 #include <linux/types.h>
36 #include <linux/list.h>
37 #include <linux/rbtree.h>
38 #include <linux/spinlock.h>
39 #include <linux/eventfd.h>
40 #include <linux/uuid.h>
41 #include <linux/kvm_host.h>
42 #include <linux/vfio.h>
43 #include <linux/mdev.h>
44 #include <linux/debugfs.h>
49 static const struct intel_gvt_ops *intel_gvt_ops;
51 /* helper macros copied from vfio-pci */
52 #define VFIO_PCI_OFFSET_SHIFT 40
53 #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
54 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
55 #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
57 #define OPREGION_SIGNATURE "IntelGraphicsMem"
60 struct intel_vgpu_regops {
61 size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
62 size_t count, loff_t *ppos, bool iswrite);
63 void (*release)(struct intel_vgpu *vgpu,
64 struct vfio_region *region);
72 const struct intel_vgpu_regops *ops;
78 struct hlist_node hnode;
81 struct kvmgt_guest_info {
83 struct intel_vgpu *vgpu;
84 struct kvm_page_track_notifier_node track_node;
85 #define NR_BKT (1 << 18)
86 struct hlist_head ptable[NR_BKT];
88 struct dentry *debugfs_cache_entries;
92 struct intel_vgpu *vgpu;
93 struct rb_node gfn_node;
94 struct rb_node dma_addr_node;
101 static inline bool handle_valid(unsigned long handle)
103 return !!(handle & ~0xff);
106 static int kvmgt_guest_init(struct mdev_device *mdev);
107 static void intel_vgpu_release_work(struct work_struct *work);
108 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
110 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
117 total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
119 for (npage = 0; npage < total_pages; npage++) {
120 unsigned long cur_gfn = gfn + npage;
122 ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
127 /* Pin a normal or compound guest page for dma. */
128 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
129 unsigned long size, struct page **page)
131 unsigned long base_pfn = 0;
136 total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
138 * We pin the pages one-by-one to avoid allocating a big arrary
139 * on stack to hold pfns.
141 for (npage = 0; npage < total_pages; npage++) {
142 unsigned long cur_gfn = gfn + npage;
145 ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
146 IOMMU_READ | IOMMU_WRITE, &pfn);
148 gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
153 if (!pfn_valid(pfn)) {
154 gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
162 else if (base_pfn + npage != pfn) {
163 gvt_vgpu_err("The pages are not continuous\n");
170 *page = pfn_to_page(base_pfn);
173 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
177 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
178 dma_addr_t *dma_addr, unsigned long size)
180 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
181 struct page *page = NULL;
184 ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
188 if (!pfn_valid(pfn)) {
189 gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
190 vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
194 /* Setup DMA mapping. */
195 *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
196 ret = dma_mapping_error(dev, *dma_addr);
198 gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
199 page_to_pfn(page), ret);
200 gvt_unpin_guest_page(vgpu, gfn, size);
206 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
207 dma_addr_t dma_addr, unsigned long size)
209 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
211 dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
212 gvt_unpin_guest_page(vgpu, gfn, size);
215 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
218 struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
222 itr = rb_entry(node, struct gvt_dma, dma_addr_node);
224 if (dma_addr < itr->dma_addr)
225 node = node->rb_left;
226 else if (dma_addr > itr->dma_addr)
227 node = node->rb_right;
234 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
236 struct rb_node *node = vgpu->vdev.gfn_cache.rb_node;
240 itr = rb_entry(node, struct gvt_dma, gfn_node);
243 node = node->rb_left;
244 else if (gfn > itr->gfn)
245 node = node->rb_right;
252 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
253 dma_addr_t dma_addr, unsigned long size)
255 struct gvt_dma *new, *itr;
256 struct rb_node **link, *parent = NULL;
258 new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
264 new->dma_addr = dma_addr;
266 kref_init(&new->ref);
268 /* gfn_cache maps gfn to struct gvt_dma. */
269 link = &vgpu->vdev.gfn_cache.rb_node;
272 itr = rb_entry(parent, struct gvt_dma, gfn_node);
275 link = &parent->rb_left;
277 link = &parent->rb_right;
279 rb_link_node(&new->gfn_node, parent, link);
280 rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache);
282 /* dma_addr_cache maps dma addr to struct gvt_dma. */
284 link = &vgpu->vdev.dma_addr_cache.rb_node;
287 itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
289 if (dma_addr < itr->dma_addr)
290 link = &parent->rb_left;
292 link = &parent->rb_right;
294 rb_link_node(&new->dma_addr_node, parent, link);
295 rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
297 vgpu->vdev.nr_cache_entries++;
301 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
302 struct gvt_dma *entry)
304 rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache);
305 rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache);
307 vgpu->vdev.nr_cache_entries--;
310 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
313 struct rb_node *node = NULL;
316 mutex_lock(&vgpu->vdev.cache_lock);
317 node = rb_first(&vgpu->vdev.gfn_cache);
319 mutex_unlock(&vgpu->vdev.cache_lock);
322 dma = rb_entry(node, struct gvt_dma, gfn_node);
323 gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
324 __gvt_cache_remove_entry(vgpu, dma);
325 mutex_unlock(&vgpu->vdev.cache_lock);
329 static void gvt_cache_init(struct intel_vgpu *vgpu)
331 vgpu->vdev.gfn_cache = RB_ROOT;
332 vgpu->vdev.dma_addr_cache = RB_ROOT;
333 vgpu->vdev.nr_cache_entries = 0;
334 mutex_init(&vgpu->vdev.cache_lock);
337 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
339 hash_init(info->ptable);
342 static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
344 struct kvmgt_pgfn *p;
345 struct hlist_node *tmp;
348 hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
354 static struct kvmgt_pgfn *
355 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
357 struct kvmgt_pgfn *p, *res = NULL;
359 hash_for_each_possible(info->ptable, p, hnode, gfn) {
369 static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
372 struct kvmgt_pgfn *p;
374 p = __kvmgt_protect_table_find(info, gfn);
378 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
380 struct kvmgt_pgfn *p;
382 if (kvmgt_gfn_is_write_protected(info, gfn))
385 p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
386 if (WARN(!p, "gfn: 0x%llx\n", gfn))
390 hash_add(info->ptable, &p->hnode, gfn);
393 static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
396 struct kvmgt_pgfn *p;
398 p = __kvmgt_protect_table_find(info, gfn);
405 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
406 size_t count, loff_t *ppos, bool iswrite)
408 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
409 VFIO_PCI_NUM_REGIONS;
410 void *base = vgpu->vdev.region[i].data;
411 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
413 if (pos >= vgpu->vdev.region[i].size || iswrite) {
414 gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
417 count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
418 memcpy(buf, base + pos, count);
423 static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
424 struct vfio_region *region)
428 static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
429 .rw = intel_vgpu_reg_rw_opregion,
430 .release = intel_vgpu_reg_release_opregion,
433 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
434 unsigned int type, unsigned int subtype,
435 const struct intel_vgpu_regops *ops,
436 size_t size, u32 flags, void *data)
438 struct vfio_region *region;
440 region = krealloc(vgpu->vdev.region,
441 (vgpu->vdev.num_regions + 1) * sizeof(*region),
446 vgpu->vdev.region = region;
447 vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
448 vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
449 vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
450 vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
451 vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
452 vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
453 vgpu->vdev.num_regions++;
457 static int kvmgt_get_vfio_device(void *p_vgpu)
459 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
461 vgpu->vdev.vfio_device = vfio_device_get_from_dev(
462 mdev_dev(vgpu->vdev.mdev));
463 if (!vgpu->vdev.vfio_device) {
464 gvt_vgpu_err("failed to get vfio device\n");
471 static int kvmgt_set_opregion(void *p_vgpu)
473 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
477 /* Each vgpu has its own opregion, although VFIO would create another
478 * one later. This one is used to expose opregion to VFIO. And the
479 * other one created by VFIO later, is used by guest actually.
481 base = vgpu_opregion(vgpu)->va;
485 if (memcmp(base, OPREGION_SIGNATURE, 16)) {
490 ret = intel_vgpu_register_reg(vgpu,
491 PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
492 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
493 &intel_vgpu_regops_opregion, OPREGION_SIZE,
494 VFIO_REGION_INFO_FLAG_READ, base);
499 static void kvmgt_put_vfio_device(void *vgpu)
501 if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
504 vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
507 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
509 struct intel_vgpu *vgpu = NULL;
510 struct intel_vgpu_type *type;
515 pdev = mdev_parent_dev(mdev);
516 gvt = kdev_to_i915(pdev)->gvt;
518 type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj));
520 gvt_vgpu_err("failed to find type %s to create\n",
526 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
527 if (IS_ERR_OR_NULL(vgpu)) {
528 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
529 gvt_err("failed to create intel vgpu: %d\n", ret);
533 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
535 vgpu->vdev.mdev = mdev;
536 mdev_set_drvdata(mdev, vgpu);
538 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
539 dev_name(mdev_dev(mdev)));
546 static int intel_vgpu_remove(struct mdev_device *mdev)
548 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
550 if (handle_valid(vgpu->handle))
553 intel_gvt_ops->vgpu_destroy(vgpu);
557 static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
558 unsigned long action, void *data)
560 struct intel_vgpu *vgpu = container_of(nb,
562 vdev.iommu_notifier);
564 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
565 struct vfio_iommu_type1_dma_unmap *unmap = data;
566 struct gvt_dma *entry;
567 unsigned long iov_pfn, end_iov_pfn;
569 iov_pfn = unmap->iova >> PAGE_SHIFT;
570 end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
572 mutex_lock(&vgpu->vdev.cache_lock);
573 for (; iov_pfn < end_iov_pfn; iov_pfn++) {
574 entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
578 gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
580 __gvt_cache_remove_entry(vgpu, entry);
582 mutex_unlock(&vgpu->vdev.cache_lock);
588 static int intel_vgpu_group_notifier(struct notifier_block *nb,
589 unsigned long action, void *data)
591 struct intel_vgpu *vgpu = container_of(nb,
593 vdev.group_notifier);
595 /* the only action we care about */
596 if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
597 vgpu->vdev.kvm = data;
600 schedule_work(&vgpu->vdev.release_work);
606 static int intel_vgpu_open(struct mdev_device *mdev)
608 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
609 unsigned long events;
612 vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
613 vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
615 events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
616 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
617 &vgpu->vdev.iommu_notifier);
619 gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
624 events = VFIO_GROUP_NOTIFY_SET_KVM;
625 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
626 &vgpu->vdev.group_notifier);
628 gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
633 ret = kvmgt_guest_init(mdev);
637 intel_gvt_ops->vgpu_activate(vgpu);
639 atomic_set(&vgpu->vdev.released, 0);
643 vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
644 &vgpu->vdev.group_notifier);
647 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
648 &vgpu->vdev.iommu_notifier);
653 static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
655 struct eventfd_ctx *trigger;
657 trigger = vgpu->vdev.msi_trigger;
659 eventfd_ctx_put(trigger);
660 vgpu->vdev.msi_trigger = NULL;
664 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
666 struct kvmgt_guest_info *info;
669 if (!handle_valid(vgpu->handle))
672 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
675 intel_gvt_ops->vgpu_deactivate(vgpu);
677 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
678 &vgpu->vdev.iommu_notifier);
679 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
681 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
682 &vgpu->vdev.group_notifier);
683 WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
685 info = (struct kvmgt_guest_info *)vgpu->handle;
686 kvmgt_guest_exit(info);
688 intel_vgpu_release_msi_eventfd_ctx(vgpu);
690 vgpu->vdev.kvm = NULL;
694 static void intel_vgpu_release(struct mdev_device *mdev)
696 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
698 __intel_vgpu_release(vgpu);
701 static void intel_vgpu_release_work(struct work_struct *work)
703 struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
706 __intel_vgpu_release(vgpu);
709 static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
711 u32 start_lo, start_hi;
714 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
715 PCI_BASE_ADDRESS_MEM_MASK;
716 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
717 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
720 case PCI_BASE_ADDRESS_MEM_TYPE_64:
721 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
724 case PCI_BASE_ADDRESS_MEM_TYPE_32:
725 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
726 /* 1M mem BAR treated as 32-bit BAR */
728 /* mem unknown type treated as 32-bit BAR */
733 return ((u64)start_hi << 32) | start_lo;
736 static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
737 void *buf, unsigned int count, bool is_write)
739 uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
743 ret = intel_gvt_ops->emulate_mmio_write(vgpu,
744 bar_start + off, buf, count);
746 ret = intel_gvt_ops->emulate_mmio_read(vgpu,
747 bar_start + off, buf, count);
751 static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
753 return off >= vgpu_aperture_offset(vgpu) &&
754 off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
757 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
758 void *buf, unsigned long count, bool is_write)
762 if (!intel_vgpu_in_aperture(vgpu, off) ||
763 !intel_vgpu_in_aperture(vgpu, off + count)) {
764 gvt_vgpu_err("Invalid aperture offset %llu\n", off);
768 aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
769 ALIGN_DOWN(off, PAGE_SIZE),
770 count + offset_in_page(off));
775 memcpy(aperture_va + offset_in_page(off), buf, count);
777 memcpy(buf, aperture_va + offset_in_page(off), count);
779 io_mapping_unmap(aperture_va);
784 static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
785 size_t count, loff_t *ppos, bool is_write)
787 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
788 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
789 uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
793 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
794 gvt_vgpu_err("invalid index: %u\n", index);
799 case VFIO_PCI_CONFIG_REGION_INDEX:
801 ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
804 ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
807 case VFIO_PCI_BAR0_REGION_INDEX:
808 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
809 buf, count, is_write);
811 case VFIO_PCI_BAR2_REGION_INDEX:
812 ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
814 case VFIO_PCI_BAR1_REGION_INDEX:
815 case VFIO_PCI_BAR3_REGION_INDEX:
816 case VFIO_PCI_BAR4_REGION_INDEX:
817 case VFIO_PCI_BAR5_REGION_INDEX:
818 case VFIO_PCI_VGA_REGION_INDEX:
819 case VFIO_PCI_ROM_REGION_INDEX:
822 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
825 index -= VFIO_PCI_NUM_REGIONS;
826 return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
830 return ret == 0 ? count : ret;
833 static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
835 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
836 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
837 struct intel_gvt *gvt = vgpu->gvt;
840 /* Only allow MMIO GGTT entry access */
841 if (index != PCI_BASE_ADDRESS_0)
844 offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
845 intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
847 return (offset >= gvt->device_info.gtt_start_offset &&
848 offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
852 static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
853 size_t count, loff_t *ppos)
855 unsigned int done = 0;
861 /* Only support GGTT entry 8 bytes read */
862 if (count >= 8 && !(*ppos % 8) &&
863 gtt_entry(mdev, ppos)) {
866 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
871 if (copy_to_user(buf, &val, sizeof(val)))
875 } else if (count >= 4 && !(*ppos % 4)) {
878 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
883 if (copy_to_user(buf, &val, sizeof(val)))
887 } else if (count >= 2 && !(*ppos % 2)) {
890 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
895 if (copy_to_user(buf, &val, sizeof(val)))
902 ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
907 if (copy_to_user(buf, &val, sizeof(val)))
925 static ssize_t intel_vgpu_write(struct mdev_device *mdev,
926 const char __user *buf,
927 size_t count, loff_t *ppos)
929 unsigned int done = 0;
935 /* Only support GGTT entry 8 bytes write */
936 if (count >= 8 && !(*ppos % 8) &&
937 gtt_entry(mdev, ppos)) {
940 if (copy_from_user(&val, buf, sizeof(val)))
943 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
949 } else if (count >= 4 && !(*ppos % 4)) {
952 if (copy_from_user(&val, buf, sizeof(val)))
955 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
961 } else if (count >= 2 && !(*ppos % 2)) {
964 if (copy_from_user(&val, buf, sizeof(val)))
967 ret = intel_vgpu_rw(mdev, (char *)&val,
968 sizeof(val), ppos, true);
976 if (copy_from_user(&val, buf, sizeof(val)))
979 ret = intel_vgpu_rw(mdev, &val, sizeof(val),
998 static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
1002 unsigned long req_size, pgoff = 0;
1004 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1006 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1007 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1010 if (vma->vm_end < vma->vm_start)
1012 if ((vma->vm_flags & VM_SHARED) == 0)
1014 if (index != VFIO_PCI_BAR2_REGION_INDEX)
1017 pg_prot = vma->vm_page_prot;
1018 virtaddr = vma->vm_start;
1019 req_size = vma->vm_end - vma->vm_start;
1020 pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
1022 return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
1025 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
1027 if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
1033 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
1034 unsigned int index, unsigned int start,
1035 unsigned int count, uint32_t flags,
1041 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
1042 unsigned int index, unsigned int start,
1043 unsigned int count, uint32_t flags, void *data)
1048 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
1049 unsigned int index, unsigned int start, unsigned int count,
1050 uint32_t flags, void *data)
1055 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
1056 unsigned int index, unsigned int start, unsigned int count,
1057 uint32_t flags, void *data)
1059 struct eventfd_ctx *trigger;
1061 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
1062 int fd = *(int *)data;
1064 trigger = eventfd_ctx_fdget(fd);
1065 if (IS_ERR(trigger)) {
1066 gvt_vgpu_err("eventfd_ctx_fdget failed\n");
1067 return PTR_ERR(trigger);
1069 vgpu->vdev.msi_trigger = trigger;
1070 } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
1071 intel_vgpu_release_msi_eventfd_ctx(vgpu);
1076 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
1077 unsigned int index, unsigned int start, unsigned int count,
1080 int (*func)(struct intel_vgpu *vgpu, unsigned int index,
1081 unsigned int start, unsigned int count, uint32_t flags,
1085 case VFIO_PCI_INTX_IRQ_INDEX:
1086 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1087 case VFIO_IRQ_SET_ACTION_MASK:
1088 func = intel_vgpu_set_intx_mask;
1090 case VFIO_IRQ_SET_ACTION_UNMASK:
1091 func = intel_vgpu_set_intx_unmask;
1093 case VFIO_IRQ_SET_ACTION_TRIGGER:
1094 func = intel_vgpu_set_intx_trigger;
1098 case VFIO_PCI_MSI_IRQ_INDEX:
1099 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1100 case VFIO_IRQ_SET_ACTION_MASK:
1101 case VFIO_IRQ_SET_ACTION_UNMASK:
1102 /* XXX Need masking support exported */
1104 case VFIO_IRQ_SET_ACTION_TRIGGER:
1105 func = intel_vgpu_set_msi_trigger;
1114 return func(vgpu, index, start, count, flags, data);
1117 static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1120 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1121 unsigned long minsz;
1123 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
1125 if (cmd == VFIO_DEVICE_GET_INFO) {
1126 struct vfio_device_info info;
1128 minsz = offsetofend(struct vfio_device_info, num_irqs);
1130 if (copy_from_user(&info, (void __user *)arg, minsz))
1133 if (info.argsz < minsz)
1136 info.flags = VFIO_DEVICE_FLAGS_PCI;
1137 info.flags |= VFIO_DEVICE_FLAGS_RESET;
1138 info.num_regions = VFIO_PCI_NUM_REGIONS +
1139 vgpu->vdev.num_regions;
1140 info.num_irqs = VFIO_PCI_NUM_IRQS;
1142 return copy_to_user((void __user *)arg, &info, minsz) ?
1145 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1146 struct vfio_region_info info;
1147 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
1149 struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
1154 minsz = offsetofend(struct vfio_region_info, offset);
1156 if (copy_from_user(&info, (void __user *)arg, minsz))
1159 if (info.argsz < minsz)
1162 switch (info.index) {
1163 case VFIO_PCI_CONFIG_REGION_INDEX:
1164 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1165 info.size = vgpu->gvt->device_info.cfg_space_size;
1166 info.flags = VFIO_REGION_INFO_FLAG_READ |
1167 VFIO_REGION_INFO_FLAG_WRITE;
1169 case VFIO_PCI_BAR0_REGION_INDEX:
1170 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1171 info.size = vgpu->cfg_space.bar[info.index].size;
1177 info.flags = VFIO_REGION_INFO_FLAG_READ |
1178 VFIO_REGION_INFO_FLAG_WRITE;
1180 case VFIO_PCI_BAR1_REGION_INDEX:
1181 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1185 case VFIO_PCI_BAR2_REGION_INDEX:
1186 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1187 info.flags = VFIO_REGION_INFO_FLAG_CAPS |
1188 VFIO_REGION_INFO_FLAG_MMAP |
1189 VFIO_REGION_INFO_FLAG_READ |
1190 VFIO_REGION_INFO_FLAG_WRITE;
1191 info.size = gvt_aperture_sz(vgpu->gvt);
1193 size = sizeof(*sparse) +
1194 (nr_areas * sizeof(*sparse->areas));
1195 sparse = kzalloc(size, GFP_KERNEL);
1199 sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1200 sparse->header.version = 1;
1201 sparse->nr_areas = nr_areas;
1202 cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1203 sparse->areas[0].offset =
1204 PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1205 sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1208 case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1209 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1213 gvt_dbg_core("get region info bar:%d\n", info.index);
1216 case VFIO_PCI_ROM_REGION_INDEX:
1217 case VFIO_PCI_VGA_REGION_INDEX:
1218 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1222 gvt_dbg_core("get region info index:%d\n", info.index);
1226 struct vfio_region_info_cap_type cap_type = {
1227 .header.id = VFIO_REGION_INFO_CAP_TYPE,
1228 .header.version = 1 };
1230 if (info.index >= VFIO_PCI_NUM_REGIONS +
1231 vgpu->vdev.num_regions)
1234 i = info.index - VFIO_PCI_NUM_REGIONS;
1237 VFIO_PCI_INDEX_TO_OFFSET(info.index);
1238 info.size = vgpu->vdev.region[i].size;
1239 info.flags = vgpu->vdev.region[i].flags;
1241 cap_type.type = vgpu->vdev.region[i].type;
1242 cap_type.subtype = vgpu->vdev.region[i].subtype;
1244 ret = vfio_info_add_capability(&caps,
1252 if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1253 switch (cap_type_id) {
1254 case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1255 ret = vfio_info_add_capability(&caps,
1256 &sparse->header, sizeof(*sparse) +
1258 sizeof(*sparse->areas)));
1269 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
1270 if (info.argsz < sizeof(info) + caps.size) {
1271 info.argsz = sizeof(info) + caps.size;
1272 info.cap_offset = 0;
1274 vfio_info_cap_shift(&caps, sizeof(info));
1275 if (copy_to_user((void __user *)arg +
1276 sizeof(info), caps.buf,
1281 info.cap_offset = sizeof(info);
1287 return copy_to_user((void __user *)arg, &info, minsz) ?
1289 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1290 struct vfio_irq_info info;
1292 minsz = offsetofend(struct vfio_irq_info, count);
1294 if (copy_from_user(&info, (void __user *)arg, minsz))
1297 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1300 switch (info.index) {
1301 case VFIO_PCI_INTX_IRQ_INDEX:
1302 case VFIO_PCI_MSI_IRQ_INDEX:
1308 info.flags = VFIO_IRQ_INFO_EVENTFD;
1310 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1312 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1313 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1314 VFIO_IRQ_INFO_AUTOMASKED);
1316 info.flags |= VFIO_IRQ_INFO_NORESIZE;
1318 return copy_to_user((void __user *)arg, &info, minsz) ?
1320 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1321 struct vfio_irq_set hdr;
1324 size_t data_size = 0;
1326 minsz = offsetofend(struct vfio_irq_set, count);
1328 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1331 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1332 int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1334 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1335 VFIO_PCI_NUM_IRQS, &data_size);
1337 gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1341 data = memdup_user((void __user *)(arg + minsz),
1344 return PTR_ERR(data);
1348 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1349 hdr.start, hdr.count, data);
1353 } else if (cmd == VFIO_DEVICE_RESET) {
1354 intel_gvt_ops->vgpu_reset(vgpu);
1356 } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
1357 struct vfio_device_gfx_plane_info dmabuf;
1360 minsz = offsetofend(struct vfio_device_gfx_plane_info,
1362 if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
1364 if (dmabuf.argsz < minsz)
1367 ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
1371 return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
1373 } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
1377 if (get_user(dmabuf_id, (__u32 __user *)arg))
1380 dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
1389 vgpu_id_show(struct device *dev, struct device_attribute *attr,
1392 struct mdev_device *mdev = mdev_from_dev(dev);
1395 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1396 mdev_get_drvdata(mdev);
1397 return sprintf(buf, "%d\n", vgpu->id);
1399 return sprintf(buf, "\n");
1403 hw_id_show(struct device *dev, struct device_attribute *attr,
1406 struct mdev_device *mdev = mdev_from_dev(dev);
1409 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1410 mdev_get_drvdata(mdev);
1411 return sprintf(buf, "%u\n",
1412 vgpu->submission.shadow_ctx->hw_id);
1414 return sprintf(buf, "\n");
1417 static DEVICE_ATTR_RO(vgpu_id);
1418 static DEVICE_ATTR_RO(hw_id);
1420 static struct attribute *intel_vgpu_attrs[] = {
1421 &dev_attr_vgpu_id.attr,
1422 &dev_attr_hw_id.attr,
1426 static const struct attribute_group intel_vgpu_group = {
1427 .name = "intel_vgpu",
1428 .attrs = intel_vgpu_attrs,
1431 static const struct attribute_group *intel_vgpu_groups[] = {
1436 static struct mdev_parent_ops intel_vgpu_ops = {
1437 .mdev_attr_groups = intel_vgpu_groups,
1438 .create = intel_vgpu_create,
1439 .remove = intel_vgpu_remove,
1441 .open = intel_vgpu_open,
1442 .release = intel_vgpu_release,
1444 .read = intel_vgpu_read,
1445 .write = intel_vgpu_write,
1446 .mmap = intel_vgpu_mmap,
1447 .ioctl = intel_vgpu_ioctl,
1450 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
1452 struct attribute **kvm_type_attrs;
1453 struct attribute_group **kvm_vgpu_type_groups;
1455 intel_gvt_ops = ops;
1456 if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
1457 &kvm_vgpu_type_groups))
1459 intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
1461 return mdev_register_device(dev, &intel_vgpu_ops);
1464 static void kvmgt_host_exit(struct device *dev, void *gvt)
1466 mdev_unregister_device(dev);
1469 static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
1471 struct kvmgt_guest_info *info;
1473 struct kvm_memory_slot *slot;
1476 if (!handle_valid(handle))
1479 info = (struct kvmgt_guest_info *)handle;
1482 idx = srcu_read_lock(&kvm->srcu);
1483 slot = gfn_to_memslot(kvm, gfn);
1485 srcu_read_unlock(&kvm->srcu, idx);
1489 spin_lock(&kvm->mmu_lock);
1491 if (kvmgt_gfn_is_write_protected(info, gfn))
1494 kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1495 kvmgt_protect_table_add(info, gfn);
1498 spin_unlock(&kvm->mmu_lock);
1499 srcu_read_unlock(&kvm->srcu, idx);
1503 static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
1505 struct kvmgt_guest_info *info;
1507 struct kvm_memory_slot *slot;
1510 if (!handle_valid(handle))
1513 info = (struct kvmgt_guest_info *)handle;
1516 idx = srcu_read_lock(&kvm->srcu);
1517 slot = gfn_to_memslot(kvm, gfn);
1519 srcu_read_unlock(&kvm->srcu, idx);
1523 spin_lock(&kvm->mmu_lock);
1525 if (!kvmgt_gfn_is_write_protected(info, gfn))
1528 kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1529 kvmgt_protect_table_del(info, gfn);
1532 spin_unlock(&kvm->mmu_lock);
1533 srcu_read_unlock(&kvm->srcu, idx);
1537 static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1538 const u8 *val, int len,
1539 struct kvm_page_track_notifier_node *node)
1541 struct kvmgt_guest_info *info = container_of(node,
1542 struct kvmgt_guest_info, track_node);
1544 if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1545 intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
1549 static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1550 struct kvm_memory_slot *slot,
1551 struct kvm_page_track_notifier_node *node)
1555 struct kvmgt_guest_info *info = container_of(node,
1556 struct kvmgt_guest_info, track_node);
1558 spin_lock(&kvm->mmu_lock);
1559 for (i = 0; i < slot->npages; i++) {
1560 gfn = slot->base_gfn + i;
1561 if (kvmgt_gfn_is_write_protected(info, gfn)) {
1562 kvm_slot_page_track_remove_page(kvm, slot, gfn,
1563 KVM_PAGE_TRACK_WRITE);
1564 kvmgt_protect_table_del(info, gfn);
1567 spin_unlock(&kvm->mmu_lock);
1570 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1572 struct intel_vgpu *itr;
1573 struct kvmgt_guest_info *info;
1577 mutex_lock(&vgpu->gvt->lock);
1578 for_each_active_vgpu(vgpu->gvt, itr, id) {
1579 if (!handle_valid(itr->handle))
1582 info = (struct kvmgt_guest_info *)itr->handle;
1583 if (kvm && kvm == info->kvm) {
1589 mutex_unlock(&vgpu->gvt->lock);
1593 static int kvmgt_guest_init(struct mdev_device *mdev)
1595 struct kvmgt_guest_info *info;
1596 struct intel_vgpu *vgpu;
1599 vgpu = mdev_get_drvdata(mdev);
1600 if (handle_valid(vgpu->handle))
1603 kvm = vgpu->vdev.kvm;
1604 if (!kvm || kvm->mm != current->mm) {
1605 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1609 if (__kvmgt_vgpu_exist(vgpu, kvm))
1612 info = vzalloc(sizeof(struct kvmgt_guest_info));
1616 vgpu->handle = (unsigned long)info;
1619 kvm_get_kvm(info->kvm);
1621 kvmgt_protect_table_init(info);
1622 gvt_cache_init(vgpu);
1624 mutex_init(&vgpu->dmabuf_lock);
1625 init_completion(&vgpu->vblank_done);
1627 info->track_node.track_write = kvmgt_page_track_write;
1628 info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1629 kvm_page_track_register_notifier(kvm, &info->track_node);
1631 info->debugfs_cache_entries = debugfs_create_ulong(
1632 "kvmgt_nr_cache_entries",
1633 0444, vgpu->debugfs,
1634 &vgpu->vdev.nr_cache_entries);
1635 if (!info->debugfs_cache_entries)
1636 gvt_vgpu_err("Cannot create kvmgt debugfs entry\n");
1641 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1643 debugfs_remove(info->debugfs_cache_entries);
1645 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1646 kvm_put_kvm(info->kvm);
1647 kvmgt_protect_table_destroy(info);
1648 gvt_cache_destroy(info->vgpu);
1654 static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1656 /* nothing to do here */
1660 static void kvmgt_detach_vgpu(unsigned long handle)
1662 /* nothing to do here */
1665 static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1667 struct kvmgt_guest_info *info;
1668 struct intel_vgpu *vgpu;
1670 if (!handle_valid(handle))
1673 info = (struct kvmgt_guest_info *)handle;
1677 * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
1678 * config and mmio register isn't restored to default during guest
1679 * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
1680 * may be enabled, then once this vgpu is active, it will get inject
1681 * vblank interrupt request. But msi_trigger is null until msi is
1682 * enabled by guest. so if msi_trigger is null, success is still
1683 * returned and don't inject interrupt into guest.
1685 if (vgpu->vdev.msi_trigger == NULL)
1688 if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
1694 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1696 struct kvmgt_guest_info *info;
1699 if (!handle_valid(handle))
1700 return INTEL_GVT_INVALID_ADDR;
1702 info = (struct kvmgt_guest_info *)handle;
1704 pfn = gfn_to_pfn(info->kvm, gfn);
1705 if (is_error_noslot_pfn(pfn))
1706 return INTEL_GVT_INVALID_ADDR;
1711 int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
1712 unsigned long size, dma_addr_t *dma_addr)
1714 struct kvmgt_guest_info *info;
1715 struct intel_vgpu *vgpu;
1716 struct gvt_dma *entry;
1719 if (!handle_valid(handle))
1722 info = (struct kvmgt_guest_info *)handle;
1725 mutex_lock(&info->vgpu->vdev.cache_lock);
1727 entry = __gvt_cache_find_gfn(info->vgpu, gfn);
1729 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1733 ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
1737 kref_get(&entry->ref);
1738 *dma_addr = entry->dma_addr;
1741 mutex_unlock(&info->vgpu->vdev.cache_lock);
1745 gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
1747 mutex_unlock(&info->vgpu->vdev.cache_lock);
1751 static void __gvt_dma_release(struct kref *ref)
1753 struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
1755 gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
1757 __gvt_cache_remove_entry(entry->vgpu, entry);
1760 void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
1762 struct kvmgt_guest_info *info;
1763 struct gvt_dma *entry;
1765 if (!handle_valid(handle))
1768 info = (struct kvmgt_guest_info *)handle;
1770 mutex_lock(&info->vgpu->vdev.cache_lock);
1771 entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
1773 kref_put(&entry->ref, __gvt_dma_release);
1774 mutex_unlock(&info->vgpu->vdev.cache_lock);
1777 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1778 void *buf, unsigned long len, bool write)
1780 struct kvmgt_guest_info *info;
1783 bool kthread = current->mm == NULL;
1785 if (!handle_valid(handle))
1788 info = (struct kvmgt_guest_info *)handle;
1794 idx = srcu_read_lock(&kvm->srcu);
1795 ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1796 kvm_read_guest(kvm, gpa, buf, len);
1797 srcu_read_unlock(&kvm->srcu, idx);
1805 static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
1806 void *buf, unsigned long len)
1808 return kvmgt_rw_gpa(handle, gpa, buf, len, false);
1811 static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
1812 void *buf, unsigned long len)
1814 return kvmgt_rw_gpa(handle, gpa, buf, len, true);
1817 static unsigned long kvmgt_virt_to_pfn(void *addr)
1819 return PFN_DOWN(__pa(addr));
1822 static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
1824 struct kvmgt_guest_info *info;
1827 if (!handle_valid(handle))
1830 info = (struct kvmgt_guest_info *)handle;
1833 return kvm_is_visible_gfn(kvm, gfn);
1837 struct intel_gvt_mpt kvmgt_mpt = {
1838 .host_init = kvmgt_host_init,
1839 .host_exit = kvmgt_host_exit,
1840 .attach_vgpu = kvmgt_attach_vgpu,
1841 .detach_vgpu = kvmgt_detach_vgpu,
1842 .inject_msi = kvmgt_inject_msi,
1843 .from_virt_to_mfn = kvmgt_virt_to_pfn,
1844 .enable_page_track = kvmgt_page_track_add,
1845 .disable_page_track = kvmgt_page_track_remove,
1846 .read_gpa = kvmgt_read_gpa,
1847 .write_gpa = kvmgt_write_gpa,
1848 .gfn_to_mfn = kvmgt_gfn_to_pfn,
1849 .dma_map_guest_page = kvmgt_dma_map_guest_page,
1850 .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
1851 .set_opregion = kvmgt_set_opregion,
1852 .get_vfio_device = kvmgt_get_vfio_device,
1853 .put_vfio_device = kvmgt_put_vfio_device,
1854 .is_valid_gfn = kvmgt_is_valid_gfn,
1856 EXPORT_SYMBOL_GPL(kvmgt_mpt);
1858 static int __init kvmgt_init(void)
1863 static void __exit kvmgt_exit(void)
1867 module_init(kvmgt_init);
1868 module_exit(kvmgt_exit);
1870 MODULE_LICENSE("GPL and additional rights");
1871 MODULE_AUTHOR("Intel Corporation");