drm/i915: Update DRIVER_DATE to 20180719
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / gvt / kvmgt.c
1 /*
2  * KVMGT - the implementation of Intel mediated pass-through framework for KVM
3  *
4  * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  * Authors:
26  *    Kevin Tian <kevin.tian@intel.com>
27  *    Jike Song <jike.song@intel.com>
28  *    Xiaoguang Chen <xiaoguang.chen@intel.com>
29  */
30
31 #include <linux/init.h>
32 #include <linux/device.h>
33 #include <linux/mm.h>
34 #include <linux/mmu_context.h>
35 #include <linux/types.h>
36 #include <linux/list.h>
37 #include <linux/rbtree.h>
38 #include <linux/spinlock.h>
39 #include <linux/eventfd.h>
40 #include <linux/uuid.h>
41 #include <linux/kvm_host.h>
42 #include <linux/vfio.h>
43 #include <linux/mdev.h>
44 #include <linux/debugfs.h>
45
46 #include "i915_drv.h"
47 #include "gvt.h"
48
49 static const struct intel_gvt_ops *intel_gvt_ops;
50
51 /* helper macros copied from vfio-pci */
52 #define VFIO_PCI_OFFSET_SHIFT   40
53 #define VFIO_PCI_OFFSET_TO_INDEX(off)   (off >> VFIO_PCI_OFFSET_SHIFT)
54 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
55 #define VFIO_PCI_OFFSET_MASK    (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
56
57 #define OPREGION_SIGNATURE "IntelGraphicsMem"
58
59 struct vfio_region;
60 struct intel_vgpu_regops {
61         size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
62                         size_t count, loff_t *ppos, bool iswrite);
63         void (*release)(struct intel_vgpu *vgpu,
64                         struct vfio_region *region);
65 };
66
67 struct vfio_region {
68         u32                             type;
69         u32                             subtype;
70         size_t                          size;
71         u32                             flags;
72         const struct intel_vgpu_regops  *ops;
73         void                            *data;
74 };
75
76 struct kvmgt_pgfn {
77         gfn_t gfn;
78         struct hlist_node hnode;
79 };
80
81 struct kvmgt_guest_info {
82         struct kvm *kvm;
83         struct intel_vgpu *vgpu;
84         struct kvm_page_track_notifier_node track_node;
85 #define NR_BKT (1 << 18)
86         struct hlist_head ptable[NR_BKT];
87 #undef NR_BKT
88         struct dentry *debugfs_cache_entries;
89 };
90
91 struct gvt_dma {
92         struct intel_vgpu *vgpu;
93         struct rb_node gfn_node;
94         struct rb_node dma_addr_node;
95         gfn_t gfn;
96         dma_addr_t dma_addr;
97         unsigned long size;
98         struct kref ref;
99 };
100
101 static inline bool handle_valid(unsigned long handle)
102 {
103         return !!(handle & ~0xff);
104 }
105
106 static int kvmgt_guest_init(struct mdev_device *mdev);
107 static void intel_vgpu_release_work(struct work_struct *work);
108 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
109
110 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
111                 unsigned long size)
112 {
113         int total_pages;
114         int npage;
115         int ret;
116
117         total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
118
119         for (npage = 0; npage < total_pages; npage++) {
120                 unsigned long cur_gfn = gfn + npage;
121
122                 ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
123                 WARN_ON(ret != 1);
124         }
125 }
126
127 /* Pin a normal or compound guest page for dma. */
128 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
129                 unsigned long size, struct page **page)
130 {
131         unsigned long base_pfn = 0;
132         int total_pages;
133         int npage;
134         int ret;
135
136         total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
137         /*
138          * We pin the pages one-by-one to avoid allocating a big arrary
139          * on stack to hold pfns.
140          */
141         for (npage = 0; npage < total_pages; npage++) {
142                 unsigned long cur_gfn = gfn + npage;
143                 unsigned long pfn;
144
145                 ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
146                                      IOMMU_READ | IOMMU_WRITE, &pfn);
147                 if (ret != 1) {
148                         gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
149                                      cur_gfn, ret);
150                         goto err;
151                 }
152
153                 if (!pfn_valid(pfn)) {
154                         gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
155                         npage++;
156                         ret = -EFAULT;
157                         goto err;
158                 }
159
160                 if (npage == 0)
161                         base_pfn = pfn;
162                 else if (base_pfn + npage != pfn) {
163                         gvt_vgpu_err("The pages are not continuous\n");
164                         ret = -EINVAL;
165                         npage++;
166                         goto err;
167                 }
168         }
169
170         *page = pfn_to_page(base_pfn);
171         return 0;
172 err:
173         gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
174         return ret;
175 }
176
177 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
178                 dma_addr_t *dma_addr, unsigned long size)
179 {
180         struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
181         struct page *page = NULL;
182         int ret;
183
184         ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
185         if (ret)
186                 return ret;
187
188         /* Setup DMA mapping. */
189         *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
190         ret = dma_mapping_error(dev, *dma_addr);
191         if (ret) {
192                 gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
193                              page_to_pfn(page), ret);
194                 gvt_unpin_guest_page(vgpu, gfn, size);
195         }
196
197         return ret;
198 }
199
200 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
201                 dma_addr_t dma_addr, unsigned long size)
202 {
203         struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
204
205         dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
206         gvt_unpin_guest_page(vgpu, gfn, size);
207 }
208
209 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
210                 dma_addr_t dma_addr)
211 {
212         struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
213         struct gvt_dma *itr;
214
215         while (node) {
216                 itr = rb_entry(node, struct gvt_dma, dma_addr_node);
217
218                 if (dma_addr < itr->dma_addr)
219                         node = node->rb_left;
220                 else if (dma_addr > itr->dma_addr)
221                         node = node->rb_right;
222                 else
223                         return itr;
224         }
225         return NULL;
226 }
227
228 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
229 {
230         struct rb_node *node = vgpu->vdev.gfn_cache.rb_node;
231         struct gvt_dma *itr;
232
233         while (node) {
234                 itr = rb_entry(node, struct gvt_dma, gfn_node);
235
236                 if (gfn < itr->gfn)
237                         node = node->rb_left;
238                 else if (gfn > itr->gfn)
239                         node = node->rb_right;
240                 else
241                         return itr;
242         }
243         return NULL;
244 }
245
246 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
247                 dma_addr_t dma_addr, unsigned long size)
248 {
249         struct gvt_dma *new, *itr;
250         struct rb_node **link, *parent = NULL;
251
252         new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
253         if (!new)
254                 return -ENOMEM;
255
256         new->vgpu = vgpu;
257         new->gfn = gfn;
258         new->dma_addr = dma_addr;
259         new->size = size;
260         kref_init(&new->ref);
261
262         /* gfn_cache maps gfn to struct gvt_dma. */
263         link = &vgpu->vdev.gfn_cache.rb_node;
264         while (*link) {
265                 parent = *link;
266                 itr = rb_entry(parent, struct gvt_dma, gfn_node);
267
268                 if (gfn < itr->gfn)
269                         link = &parent->rb_left;
270                 else
271                         link = &parent->rb_right;
272         }
273         rb_link_node(&new->gfn_node, parent, link);
274         rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache);
275
276         /* dma_addr_cache maps dma addr to struct gvt_dma. */
277         parent = NULL;
278         link = &vgpu->vdev.dma_addr_cache.rb_node;
279         while (*link) {
280                 parent = *link;
281                 itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
282
283                 if (dma_addr < itr->dma_addr)
284                         link = &parent->rb_left;
285                 else
286                         link = &parent->rb_right;
287         }
288         rb_link_node(&new->dma_addr_node, parent, link);
289         rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
290
291         vgpu->vdev.nr_cache_entries++;
292         return 0;
293 }
294
295 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
296                                 struct gvt_dma *entry)
297 {
298         rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache);
299         rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache);
300         kfree(entry);
301         vgpu->vdev.nr_cache_entries--;
302 }
303
304 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
305 {
306         struct gvt_dma *dma;
307         struct rb_node *node = NULL;
308
309         for (;;) {
310                 mutex_lock(&vgpu->vdev.cache_lock);
311                 node = rb_first(&vgpu->vdev.gfn_cache);
312                 if (!node) {
313                         mutex_unlock(&vgpu->vdev.cache_lock);
314                         break;
315                 }
316                 dma = rb_entry(node, struct gvt_dma, gfn_node);
317                 gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
318                 __gvt_cache_remove_entry(vgpu, dma);
319                 mutex_unlock(&vgpu->vdev.cache_lock);
320         }
321 }
322
323 static void gvt_cache_init(struct intel_vgpu *vgpu)
324 {
325         vgpu->vdev.gfn_cache = RB_ROOT;
326         vgpu->vdev.dma_addr_cache = RB_ROOT;
327         vgpu->vdev.nr_cache_entries = 0;
328         mutex_init(&vgpu->vdev.cache_lock);
329 }
330
331 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
332 {
333         hash_init(info->ptable);
334 }
335
336 static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
337 {
338         struct kvmgt_pgfn *p;
339         struct hlist_node *tmp;
340         int i;
341
342         hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
343                 hash_del(&p->hnode);
344                 kfree(p);
345         }
346 }
347
348 static struct kvmgt_pgfn *
349 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
350 {
351         struct kvmgt_pgfn *p, *res = NULL;
352
353         hash_for_each_possible(info->ptable, p, hnode, gfn) {
354                 if (gfn == p->gfn) {
355                         res = p;
356                         break;
357                 }
358         }
359
360         return res;
361 }
362
363 static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
364                                 gfn_t gfn)
365 {
366         struct kvmgt_pgfn *p;
367
368         p = __kvmgt_protect_table_find(info, gfn);
369         return !!p;
370 }
371
372 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
373 {
374         struct kvmgt_pgfn *p;
375
376         if (kvmgt_gfn_is_write_protected(info, gfn))
377                 return;
378
379         p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
380         if (WARN(!p, "gfn: 0x%llx\n", gfn))
381                 return;
382
383         p->gfn = gfn;
384         hash_add(info->ptable, &p->hnode, gfn);
385 }
386
387 static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
388                                 gfn_t gfn)
389 {
390         struct kvmgt_pgfn *p;
391
392         p = __kvmgt_protect_table_find(info, gfn);
393         if (p) {
394                 hash_del(&p->hnode);
395                 kfree(p);
396         }
397 }
398
399 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
400                 size_t count, loff_t *ppos, bool iswrite)
401 {
402         unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
403                         VFIO_PCI_NUM_REGIONS;
404         void *base = vgpu->vdev.region[i].data;
405         loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
406
407         if (pos >= vgpu->vdev.region[i].size || iswrite) {
408                 gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
409                 return -EINVAL;
410         }
411         count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
412         memcpy(buf, base + pos, count);
413
414         return count;
415 }
416
417 static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
418                 struct vfio_region *region)
419 {
420 }
421
422 static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
423         .rw = intel_vgpu_reg_rw_opregion,
424         .release = intel_vgpu_reg_release_opregion,
425 };
426
427 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
428                 unsigned int type, unsigned int subtype,
429                 const struct intel_vgpu_regops *ops,
430                 size_t size, u32 flags, void *data)
431 {
432         struct vfio_region *region;
433
434         region = krealloc(vgpu->vdev.region,
435                         (vgpu->vdev.num_regions + 1) * sizeof(*region),
436                         GFP_KERNEL);
437         if (!region)
438                 return -ENOMEM;
439
440         vgpu->vdev.region = region;
441         vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
442         vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
443         vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
444         vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
445         vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
446         vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
447         vgpu->vdev.num_regions++;
448         return 0;
449 }
450
451 static int kvmgt_get_vfio_device(void *p_vgpu)
452 {
453         struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
454
455         vgpu->vdev.vfio_device = vfio_device_get_from_dev(
456                 mdev_dev(vgpu->vdev.mdev));
457         if (!vgpu->vdev.vfio_device) {
458                 gvt_vgpu_err("failed to get vfio device\n");
459                 return -ENODEV;
460         }
461         return 0;
462 }
463
464
465 static int kvmgt_set_opregion(void *p_vgpu)
466 {
467         struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
468         void *base;
469         int ret;
470
471         /* Each vgpu has its own opregion, although VFIO would create another
472          * one later. This one is used to expose opregion to VFIO. And the
473          * other one created by VFIO later, is used by guest actually.
474          */
475         base = vgpu_opregion(vgpu)->va;
476         if (!base)
477                 return -ENOMEM;
478
479         if (memcmp(base, OPREGION_SIGNATURE, 16)) {
480                 memunmap(base);
481                 return -EINVAL;
482         }
483
484         ret = intel_vgpu_register_reg(vgpu,
485                         PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
486                         VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
487                         &intel_vgpu_regops_opregion, OPREGION_SIZE,
488                         VFIO_REGION_INFO_FLAG_READ, base);
489
490         return ret;
491 }
492
493 static void kvmgt_put_vfio_device(void *vgpu)
494 {
495         if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
496                 return;
497
498         vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
499 }
500
501 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
502 {
503         struct intel_vgpu *vgpu = NULL;
504         struct intel_vgpu_type *type;
505         struct device *pdev;
506         void *gvt;
507         int ret;
508
509         pdev = mdev_parent_dev(mdev);
510         gvt = kdev_to_i915(pdev)->gvt;
511
512         type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj));
513         if (!type) {
514                 gvt_vgpu_err("failed to find type %s to create\n",
515                                                 kobject_name(kobj));
516                 ret = -EINVAL;
517                 goto out;
518         }
519
520         vgpu = intel_gvt_ops->vgpu_create(gvt, type);
521         if (IS_ERR_OR_NULL(vgpu)) {
522                 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
523                 gvt_err("failed to create intel vgpu: %d\n", ret);
524                 goto out;
525         }
526
527         INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
528
529         vgpu->vdev.mdev = mdev;
530         mdev_set_drvdata(mdev, vgpu);
531
532         gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
533                      dev_name(mdev_dev(mdev)));
534         ret = 0;
535
536 out:
537         return ret;
538 }
539
540 static int intel_vgpu_remove(struct mdev_device *mdev)
541 {
542         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
543
544         if (handle_valid(vgpu->handle))
545                 return -EBUSY;
546
547         intel_gvt_ops->vgpu_destroy(vgpu);
548         return 0;
549 }
550
551 static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
552                                      unsigned long action, void *data)
553 {
554         struct intel_vgpu *vgpu = container_of(nb,
555                                         struct intel_vgpu,
556                                         vdev.iommu_notifier);
557
558         if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
559                 struct vfio_iommu_type1_dma_unmap *unmap = data;
560                 struct gvt_dma *entry;
561                 unsigned long iov_pfn, end_iov_pfn;
562
563                 iov_pfn = unmap->iova >> PAGE_SHIFT;
564                 end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
565
566                 mutex_lock(&vgpu->vdev.cache_lock);
567                 for (; iov_pfn < end_iov_pfn; iov_pfn++) {
568                         entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
569                         if (!entry)
570                                 continue;
571
572                         gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
573                                            entry->size);
574                         __gvt_cache_remove_entry(vgpu, entry);
575                 }
576                 mutex_unlock(&vgpu->vdev.cache_lock);
577         }
578
579         return NOTIFY_OK;
580 }
581
582 static int intel_vgpu_group_notifier(struct notifier_block *nb,
583                                      unsigned long action, void *data)
584 {
585         struct intel_vgpu *vgpu = container_of(nb,
586                                         struct intel_vgpu,
587                                         vdev.group_notifier);
588
589         /* the only action we care about */
590         if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
591                 vgpu->vdev.kvm = data;
592
593                 if (!data)
594                         schedule_work(&vgpu->vdev.release_work);
595         }
596
597         return NOTIFY_OK;
598 }
599
600 static int intel_vgpu_open(struct mdev_device *mdev)
601 {
602         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
603         unsigned long events;
604         int ret;
605
606         vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
607         vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
608
609         events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
610         ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
611                                 &vgpu->vdev.iommu_notifier);
612         if (ret != 0) {
613                 gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
614                         ret);
615                 goto out;
616         }
617
618         events = VFIO_GROUP_NOTIFY_SET_KVM;
619         ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
620                                 &vgpu->vdev.group_notifier);
621         if (ret != 0) {
622                 gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
623                         ret);
624                 goto undo_iommu;
625         }
626
627         ret = kvmgt_guest_init(mdev);
628         if (ret)
629                 goto undo_group;
630
631         intel_gvt_ops->vgpu_activate(vgpu);
632
633         atomic_set(&vgpu->vdev.released, 0);
634         return ret;
635
636 undo_group:
637         vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
638                                         &vgpu->vdev.group_notifier);
639
640 undo_iommu:
641         vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
642                                         &vgpu->vdev.iommu_notifier);
643 out:
644         return ret;
645 }
646
647 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
648 {
649         struct kvmgt_guest_info *info;
650         int ret;
651
652         if (!handle_valid(vgpu->handle))
653                 return;
654
655         if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
656                 return;
657
658         intel_gvt_ops->vgpu_deactivate(vgpu);
659
660         ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
661                                         &vgpu->vdev.iommu_notifier);
662         WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
663
664         ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
665                                         &vgpu->vdev.group_notifier);
666         WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
667
668         info = (struct kvmgt_guest_info *)vgpu->handle;
669         kvmgt_guest_exit(info);
670
671         vgpu->vdev.kvm = NULL;
672         vgpu->handle = 0;
673 }
674
675 static void intel_vgpu_release(struct mdev_device *mdev)
676 {
677         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
678
679         __intel_vgpu_release(vgpu);
680 }
681
682 static void intel_vgpu_release_work(struct work_struct *work)
683 {
684         struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
685                                         vdev.release_work);
686
687         __intel_vgpu_release(vgpu);
688 }
689
690 static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
691 {
692         u32 start_lo, start_hi;
693         u32 mem_type;
694
695         start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
696                         PCI_BASE_ADDRESS_MEM_MASK;
697         mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
698                         PCI_BASE_ADDRESS_MEM_TYPE_MASK;
699
700         switch (mem_type) {
701         case PCI_BASE_ADDRESS_MEM_TYPE_64:
702                 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
703                                                 + bar + 4));
704                 break;
705         case PCI_BASE_ADDRESS_MEM_TYPE_32:
706         case PCI_BASE_ADDRESS_MEM_TYPE_1M:
707                 /* 1M mem BAR treated as 32-bit BAR */
708         default:
709                 /* mem unknown type treated as 32-bit BAR */
710                 start_hi = 0;
711                 break;
712         }
713
714         return ((u64)start_hi << 32) | start_lo;
715 }
716
717 static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
718                              void *buf, unsigned int count, bool is_write)
719 {
720         uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
721         int ret;
722
723         if (is_write)
724                 ret = intel_gvt_ops->emulate_mmio_write(vgpu,
725                                         bar_start + off, buf, count);
726         else
727                 ret = intel_gvt_ops->emulate_mmio_read(vgpu,
728                                         bar_start + off, buf, count);
729         return ret;
730 }
731
732 static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
733 {
734         return off >= vgpu_aperture_offset(vgpu) &&
735                off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
736 }
737
738 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
739                 void *buf, unsigned long count, bool is_write)
740 {
741         void *aperture_va;
742
743         if (!intel_vgpu_in_aperture(vgpu, off) ||
744             !intel_vgpu_in_aperture(vgpu, off + count)) {
745                 gvt_vgpu_err("Invalid aperture offset %llu\n", off);
746                 return -EINVAL;
747         }
748
749         aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
750                                         ALIGN_DOWN(off, PAGE_SIZE),
751                                         count + offset_in_page(off));
752         if (!aperture_va)
753                 return -EIO;
754
755         if (is_write)
756                 memcpy(aperture_va + offset_in_page(off), buf, count);
757         else
758                 memcpy(buf, aperture_va + offset_in_page(off), count);
759
760         io_mapping_unmap(aperture_va);
761
762         return 0;
763 }
764
765 static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
766                         size_t count, loff_t *ppos, bool is_write)
767 {
768         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
769         unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
770         uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
771         int ret = -EINVAL;
772
773
774         if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
775                 gvt_vgpu_err("invalid index: %u\n", index);
776                 return -EINVAL;
777         }
778
779         switch (index) {
780         case VFIO_PCI_CONFIG_REGION_INDEX:
781                 if (is_write)
782                         ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
783                                                 buf, count);
784                 else
785                         ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
786                                                 buf, count);
787                 break;
788         case VFIO_PCI_BAR0_REGION_INDEX:
789                 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
790                                         buf, count, is_write);
791                 break;
792         case VFIO_PCI_BAR2_REGION_INDEX:
793                 ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
794                 break;
795         case VFIO_PCI_BAR1_REGION_INDEX:
796         case VFIO_PCI_BAR3_REGION_INDEX:
797         case VFIO_PCI_BAR4_REGION_INDEX:
798         case VFIO_PCI_BAR5_REGION_INDEX:
799         case VFIO_PCI_VGA_REGION_INDEX:
800         case VFIO_PCI_ROM_REGION_INDEX:
801                 break;
802         default:
803                 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
804                         return -EINVAL;
805
806                 index -= VFIO_PCI_NUM_REGIONS;
807                 return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
808                                 ppos, is_write);
809         }
810
811         return ret == 0 ? count : ret;
812 }
813
814 static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
815 {
816         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
817         unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
818         struct intel_gvt *gvt = vgpu->gvt;
819         int offset;
820
821         /* Only allow MMIO GGTT entry access */
822         if (index != PCI_BASE_ADDRESS_0)
823                 return false;
824
825         offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
826                 intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
827
828         return (offset >= gvt->device_info.gtt_start_offset &&
829                 offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
830                         true : false;
831 }
832
833 static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
834                         size_t count, loff_t *ppos)
835 {
836         unsigned int done = 0;
837         int ret;
838
839         while (count) {
840                 size_t filled;
841
842                 /* Only support GGTT entry 8 bytes read */
843                 if (count >= 8 && !(*ppos % 8) &&
844                         gtt_entry(mdev, ppos)) {
845                         u64 val;
846
847                         ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
848                                         ppos, false);
849                         if (ret <= 0)
850                                 goto read_err;
851
852                         if (copy_to_user(buf, &val, sizeof(val)))
853                                 goto read_err;
854
855                         filled = 8;
856                 } else if (count >= 4 && !(*ppos % 4)) {
857                         u32 val;
858
859                         ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
860                                         ppos, false);
861                         if (ret <= 0)
862                                 goto read_err;
863
864                         if (copy_to_user(buf, &val, sizeof(val)))
865                                 goto read_err;
866
867                         filled = 4;
868                 } else if (count >= 2 && !(*ppos % 2)) {
869                         u16 val;
870
871                         ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
872                                         ppos, false);
873                         if (ret <= 0)
874                                 goto read_err;
875
876                         if (copy_to_user(buf, &val, sizeof(val)))
877                                 goto read_err;
878
879                         filled = 2;
880                 } else {
881                         u8 val;
882
883                         ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
884                                         false);
885                         if (ret <= 0)
886                                 goto read_err;
887
888                         if (copy_to_user(buf, &val, sizeof(val)))
889                                 goto read_err;
890
891                         filled = 1;
892                 }
893
894                 count -= filled;
895                 done += filled;
896                 *ppos += filled;
897                 buf += filled;
898         }
899
900         return done;
901
902 read_err:
903         return -EFAULT;
904 }
905
906 static ssize_t intel_vgpu_write(struct mdev_device *mdev,
907                                 const char __user *buf,
908                                 size_t count, loff_t *ppos)
909 {
910         unsigned int done = 0;
911         int ret;
912
913         while (count) {
914                 size_t filled;
915
916                 /* Only support GGTT entry 8 bytes write */
917                 if (count >= 8 && !(*ppos % 8) &&
918                         gtt_entry(mdev, ppos)) {
919                         u64 val;
920
921                         if (copy_from_user(&val, buf, sizeof(val)))
922                                 goto write_err;
923
924                         ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
925                                         ppos, true);
926                         if (ret <= 0)
927                                 goto write_err;
928
929                         filled = 8;
930                 } else if (count >= 4 && !(*ppos % 4)) {
931                         u32 val;
932
933                         if (copy_from_user(&val, buf, sizeof(val)))
934                                 goto write_err;
935
936                         ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
937                                         ppos, true);
938                         if (ret <= 0)
939                                 goto write_err;
940
941                         filled = 4;
942                 } else if (count >= 2 && !(*ppos % 2)) {
943                         u16 val;
944
945                         if (copy_from_user(&val, buf, sizeof(val)))
946                                 goto write_err;
947
948                         ret = intel_vgpu_rw(mdev, (char *)&val,
949                                         sizeof(val), ppos, true);
950                         if (ret <= 0)
951                                 goto write_err;
952
953                         filled = 2;
954                 } else {
955                         u8 val;
956
957                         if (copy_from_user(&val, buf, sizeof(val)))
958                                 goto write_err;
959
960                         ret = intel_vgpu_rw(mdev, &val, sizeof(val),
961                                         ppos, true);
962                         if (ret <= 0)
963                                 goto write_err;
964
965                         filled = 1;
966                 }
967
968                 count -= filled;
969                 done += filled;
970                 *ppos += filled;
971                 buf += filled;
972         }
973
974         return done;
975 write_err:
976         return -EFAULT;
977 }
978
979 static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
980 {
981         unsigned int index;
982         u64 virtaddr;
983         unsigned long req_size, pgoff = 0;
984         pgprot_t pg_prot;
985         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
986
987         index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
988         if (index >= VFIO_PCI_ROM_REGION_INDEX)
989                 return -EINVAL;
990
991         if (vma->vm_end < vma->vm_start)
992                 return -EINVAL;
993         if ((vma->vm_flags & VM_SHARED) == 0)
994                 return -EINVAL;
995         if (index != VFIO_PCI_BAR2_REGION_INDEX)
996                 return -EINVAL;
997
998         pg_prot = vma->vm_page_prot;
999         virtaddr = vma->vm_start;
1000         req_size = vma->vm_end - vma->vm_start;
1001         pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
1002
1003         return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
1004 }
1005
1006 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
1007 {
1008         if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
1009                 return 1;
1010
1011         return 0;
1012 }
1013
1014 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
1015                         unsigned int index, unsigned int start,
1016                         unsigned int count, uint32_t flags,
1017                         void *data)
1018 {
1019         return 0;
1020 }
1021
1022 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
1023                         unsigned int index, unsigned int start,
1024                         unsigned int count, uint32_t flags, void *data)
1025 {
1026         return 0;
1027 }
1028
1029 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
1030                 unsigned int index, unsigned int start, unsigned int count,
1031                 uint32_t flags, void *data)
1032 {
1033         return 0;
1034 }
1035
1036 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
1037                 unsigned int index, unsigned int start, unsigned int count,
1038                 uint32_t flags, void *data)
1039 {
1040         struct eventfd_ctx *trigger;
1041
1042         if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
1043                 int fd = *(int *)data;
1044
1045                 trigger = eventfd_ctx_fdget(fd);
1046                 if (IS_ERR(trigger)) {
1047                         gvt_vgpu_err("eventfd_ctx_fdget failed\n");
1048                         return PTR_ERR(trigger);
1049                 }
1050                 vgpu->vdev.msi_trigger = trigger;
1051         }
1052
1053         return 0;
1054 }
1055
1056 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
1057                 unsigned int index, unsigned int start, unsigned int count,
1058                 void *data)
1059 {
1060         int (*func)(struct intel_vgpu *vgpu, unsigned int index,
1061                         unsigned int start, unsigned int count, uint32_t flags,
1062                         void *data) = NULL;
1063
1064         switch (index) {
1065         case VFIO_PCI_INTX_IRQ_INDEX:
1066                 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1067                 case VFIO_IRQ_SET_ACTION_MASK:
1068                         func = intel_vgpu_set_intx_mask;
1069                         break;
1070                 case VFIO_IRQ_SET_ACTION_UNMASK:
1071                         func = intel_vgpu_set_intx_unmask;
1072                         break;
1073                 case VFIO_IRQ_SET_ACTION_TRIGGER:
1074                         func = intel_vgpu_set_intx_trigger;
1075                         break;
1076                 }
1077                 break;
1078         case VFIO_PCI_MSI_IRQ_INDEX:
1079                 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1080                 case VFIO_IRQ_SET_ACTION_MASK:
1081                 case VFIO_IRQ_SET_ACTION_UNMASK:
1082                         /* XXX Need masking support exported */
1083                         break;
1084                 case VFIO_IRQ_SET_ACTION_TRIGGER:
1085                         func = intel_vgpu_set_msi_trigger;
1086                         break;
1087                 }
1088                 break;
1089         }
1090
1091         if (!func)
1092                 return -ENOTTY;
1093
1094         return func(vgpu, index, start, count, flags, data);
1095 }
1096
1097 static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1098                              unsigned long arg)
1099 {
1100         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1101         unsigned long minsz;
1102
1103         gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
1104
1105         if (cmd == VFIO_DEVICE_GET_INFO) {
1106                 struct vfio_device_info info;
1107
1108                 minsz = offsetofend(struct vfio_device_info, num_irqs);
1109
1110                 if (copy_from_user(&info, (void __user *)arg, minsz))
1111                         return -EFAULT;
1112
1113                 if (info.argsz < minsz)
1114                         return -EINVAL;
1115
1116                 info.flags = VFIO_DEVICE_FLAGS_PCI;
1117                 info.flags |= VFIO_DEVICE_FLAGS_RESET;
1118                 info.num_regions = VFIO_PCI_NUM_REGIONS +
1119                                 vgpu->vdev.num_regions;
1120                 info.num_irqs = VFIO_PCI_NUM_IRQS;
1121
1122                 return copy_to_user((void __user *)arg, &info, minsz) ?
1123                         -EFAULT : 0;
1124
1125         } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1126                 struct vfio_region_info info;
1127                 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
1128                 int i, ret;
1129                 struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
1130                 size_t size;
1131                 int nr_areas = 1;
1132                 int cap_type_id;
1133
1134                 minsz = offsetofend(struct vfio_region_info, offset);
1135
1136                 if (copy_from_user(&info, (void __user *)arg, minsz))
1137                         return -EFAULT;
1138
1139                 if (info.argsz < minsz)
1140                         return -EINVAL;
1141
1142                 switch (info.index) {
1143                 case VFIO_PCI_CONFIG_REGION_INDEX:
1144                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1145                         info.size = vgpu->gvt->device_info.cfg_space_size;
1146                         info.flags = VFIO_REGION_INFO_FLAG_READ |
1147                                      VFIO_REGION_INFO_FLAG_WRITE;
1148                         break;
1149                 case VFIO_PCI_BAR0_REGION_INDEX:
1150                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1151                         info.size = vgpu->cfg_space.bar[info.index].size;
1152                         if (!info.size) {
1153                                 info.flags = 0;
1154                                 break;
1155                         }
1156
1157                         info.flags = VFIO_REGION_INFO_FLAG_READ |
1158                                      VFIO_REGION_INFO_FLAG_WRITE;
1159                         break;
1160                 case VFIO_PCI_BAR1_REGION_INDEX:
1161                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1162                         info.size = 0;
1163                         info.flags = 0;
1164                         break;
1165                 case VFIO_PCI_BAR2_REGION_INDEX:
1166                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1167                         info.flags = VFIO_REGION_INFO_FLAG_CAPS |
1168                                         VFIO_REGION_INFO_FLAG_MMAP |
1169                                         VFIO_REGION_INFO_FLAG_READ |
1170                                         VFIO_REGION_INFO_FLAG_WRITE;
1171                         info.size = gvt_aperture_sz(vgpu->gvt);
1172
1173                         size = sizeof(*sparse) +
1174                                         (nr_areas * sizeof(*sparse->areas));
1175                         sparse = kzalloc(size, GFP_KERNEL);
1176                         if (!sparse)
1177                                 return -ENOMEM;
1178
1179                         sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1180                         sparse->header.version = 1;
1181                         sparse->nr_areas = nr_areas;
1182                         cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1183                         sparse->areas[0].offset =
1184                                         PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1185                         sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1186                         break;
1187
1188                 case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1189                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1190                         info.size = 0;
1191                         info.flags = 0;
1192
1193                         gvt_dbg_core("get region info bar:%d\n", info.index);
1194                         break;
1195
1196                 case VFIO_PCI_ROM_REGION_INDEX:
1197                 case VFIO_PCI_VGA_REGION_INDEX:
1198                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1199                         info.size = 0;
1200                         info.flags = 0;
1201
1202                         gvt_dbg_core("get region info index:%d\n", info.index);
1203                         break;
1204                 default:
1205                         {
1206                                 struct vfio_region_info_cap_type cap_type = {
1207                                         .header.id = VFIO_REGION_INFO_CAP_TYPE,
1208                                         .header.version = 1 };
1209
1210                                 if (info.index >= VFIO_PCI_NUM_REGIONS +
1211                                                 vgpu->vdev.num_regions)
1212                                         return -EINVAL;
1213
1214                                 i = info.index - VFIO_PCI_NUM_REGIONS;
1215
1216                                 info.offset =
1217                                         VFIO_PCI_INDEX_TO_OFFSET(info.index);
1218                                 info.size = vgpu->vdev.region[i].size;
1219                                 info.flags = vgpu->vdev.region[i].flags;
1220
1221                                 cap_type.type = vgpu->vdev.region[i].type;
1222                                 cap_type.subtype = vgpu->vdev.region[i].subtype;
1223
1224                                 ret = vfio_info_add_capability(&caps,
1225                                                         &cap_type.header,
1226                                                         sizeof(cap_type));
1227                                 if (ret)
1228                                         return ret;
1229                         }
1230                 }
1231
1232                 if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1233                         switch (cap_type_id) {
1234                         case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1235                                 ret = vfio_info_add_capability(&caps,
1236                                         &sparse->header, sizeof(*sparse) +
1237                                         (sparse->nr_areas *
1238                                                 sizeof(*sparse->areas)));
1239                                 kfree(sparse);
1240                                 if (ret)
1241                                         return ret;
1242                                 break;
1243                         default:
1244                                 return -EINVAL;
1245                         }
1246                 }
1247
1248                 if (caps.size) {
1249                         info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
1250                         if (info.argsz < sizeof(info) + caps.size) {
1251                                 info.argsz = sizeof(info) + caps.size;
1252                                 info.cap_offset = 0;
1253                         } else {
1254                                 vfio_info_cap_shift(&caps, sizeof(info));
1255                                 if (copy_to_user((void __user *)arg +
1256                                                   sizeof(info), caps.buf,
1257                                                   caps.size)) {
1258                                         kfree(caps.buf);
1259                                         return -EFAULT;
1260                                 }
1261                                 info.cap_offset = sizeof(info);
1262                         }
1263
1264                         kfree(caps.buf);
1265                 }
1266
1267                 return copy_to_user((void __user *)arg, &info, minsz) ?
1268                         -EFAULT : 0;
1269         } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1270                 struct vfio_irq_info info;
1271
1272                 minsz = offsetofend(struct vfio_irq_info, count);
1273
1274                 if (copy_from_user(&info, (void __user *)arg, minsz))
1275                         return -EFAULT;
1276
1277                 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1278                         return -EINVAL;
1279
1280                 switch (info.index) {
1281                 case VFIO_PCI_INTX_IRQ_INDEX:
1282                 case VFIO_PCI_MSI_IRQ_INDEX:
1283                         break;
1284                 default:
1285                         return -EINVAL;
1286                 }
1287
1288                 info.flags = VFIO_IRQ_INFO_EVENTFD;
1289
1290                 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1291
1292                 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1293                         info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1294                                        VFIO_IRQ_INFO_AUTOMASKED);
1295                 else
1296                         info.flags |= VFIO_IRQ_INFO_NORESIZE;
1297
1298                 return copy_to_user((void __user *)arg, &info, minsz) ?
1299                         -EFAULT : 0;
1300         } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1301                 struct vfio_irq_set hdr;
1302                 u8 *data = NULL;
1303                 int ret = 0;
1304                 size_t data_size = 0;
1305
1306                 minsz = offsetofend(struct vfio_irq_set, count);
1307
1308                 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1309                         return -EFAULT;
1310
1311                 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1312                         int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1313
1314                         ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1315                                                 VFIO_PCI_NUM_IRQS, &data_size);
1316                         if (ret) {
1317                                 gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1318                                 return -EINVAL;
1319                         }
1320                         if (data_size) {
1321                                 data = memdup_user((void __user *)(arg + minsz),
1322                                                    data_size);
1323                                 if (IS_ERR(data))
1324                                         return PTR_ERR(data);
1325                         }
1326                 }
1327
1328                 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1329                                         hdr.start, hdr.count, data);
1330                 kfree(data);
1331
1332                 return ret;
1333         } else if (cmd == VFIO_DEVICE_RESET) {
1334                 intel_gvt_ops->vgpu_reset(vgpu);
1335                 return 0;
1336         } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
1337                 struct vfio_device_gfx_plane_info dmabuf;
1338                 int ret = 0;
1339
1340                 minsz = offsetofend(struct vfio_device_gfx_plane_info,
1341                                     dmabuf_id);
1342                 if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
1343                         return -EFAULT;
1344                 if (dmabuf.argsz < minsz)
1345                         return -EINVAL;
1346
1347                 ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
1348                 if (ret != 0)
1349                         return ret;
1350
1351                 return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
1352                                                                 -EFAULT : 0;
1353         } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
1354                 __u32 dmabuf_id;
1355                 __s32 dmabuf_fd;
1356
1357                 if (get_user(dmabuf_id, (__u32 __user *)arg))
1358                         return -EFAULT;
1359
1360                 dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
1361                 return dmabuf_fd;
1362
1363         }
1364
1365         return -ENOTTY;
1366 }
1367
1368 static ssize_t
1369 vgpu_id_show(struct device *dev, struct device_attribute *attr,
1370              char *buf)
1371 {
1372         struct mdev_device *mdev = mdev_from_dev(dev);
1373
1374         if (mdev) {
1375                 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1376                         mdev_get_drvdata(mdev);
1377                 return sprintf(buf, "%d\n", vgpu->id);
1378         }
1379         return sprintf(buf, "\n");
1380 }
1381
1382 static ssize_t
1383 hw_id_show(struct device *dev, struct device_attribute *attr,
1384            char *buf)
1385 {
1386         struct mdev_device *mdev = mdev_from_dev(dev);
1387
1388         if (mdev) {
1389                 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1390                         mdev_get_drvdata(mdev);
1391                 return sprintf(buf, "%u\n",
1392                                vgpu->submission.shadow_ctx->hw_id);
1393         }
1394         return sprintf(buf, "\n");
1395 }
1396
1397 static DEVICE_ATTR_RO(vgpu_id);
1398 static DEVICE_ATTR_RO(hw_id);
1399
1400 static struct attribute *intel_vgpu_attrs[] = {
1401         &dev_attr_vgpu_id.attr,
1402         &dev_attr_hw_id.attr,
1403         NULL
1404 };
1405
1406 static const struct attribute_group intel_vgpu_group = {
1407         .name = "intel_vgpu",
1408         .attrs = intel_vgpu_attrs,
1409 };
1410
1411 static const struct attribute_group *intel_vgpu_groups[] = {
1412         &intel_vgpu_group,
1413         NULL,
1414 };
1415
1416 static struct mdev_parent_ops intel_vgpu_ops = {
1417         .mdev_attr_groups       = intel_vgpu_groups,
1418         .create                 = intel_vgpu_create,
1419         .remove                 = intel_vgpu_remove,
1420
1421         .open                   = intel_vgpu_open,
1422         .release                = intel_vgpu_release,
1423
1424         .read                   = intel_vgpu_read,
1425         .write                  = intel_vgpu_write,
1426         .mmap                   = intel_vgpu_mmap,
1427         .ioctl                  = intel_vgpu_ioctl,
1428 };
1429
1430 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
1431 {
1432         struct attribute **kvm_type_attrs;
1433         struct attribute_group **kvm_vgpu_type_groups;
1434
1435         intel_gvt_ops = ops;
1436         if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
1437                         &kvm_vgpu_type_groups))
1438                 return -EFAULT;
1439         intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
1440
1441         return mdev_register_device(dev, &intel_vgpu_ops);
1442 }
1443
1444 static void kvmgt_host_exit(struct device *dev, void *gvt)
1445 {
1446         mdev_unregister_device(dev);
1447 }
1448
1449 static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
1450 {
1451         struct kvmgt_guest_info *info;
1452         struct kvm *kvm;
1453         struct kvm_memory_slot *slot;
1454         int idx;
1455
1456         if (!handle_valid(handle))
1457                 return -ESRCH;
1458
1459         info = (struct kvmgt_guest_info *)handle;
1460         kvm = info->kvm;
1461
1462         idx = srcu_read_lock(&kvm->srcu);
1463         slot = gfn_to_memslot(kvm, gfn);
1464         if (!slot) {
1465                 srcu_read_unlock(&kvm->srcu, idx);
1466                 return -EINVAL;
1467         }
1468
1469         spin_lock(&kvm->mmu_lock);
1470
1471         if (kvmgt_gfn_is_write_protected(info, gfn))
1472                 goto out;
1473
1474         kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1475         kvmgt_protect_table_add(info, gfn);
1476
1477 out:
1478         spin_unlock(&kvm->mmu_lock);
1479         srcu_read_unlock(&kvm->srcu, idx);
1480         return 0;
1481 }
1482
1483 static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
1484 {
1485         struct kvmgt_guest_info *info;
1486         struct kvm *kvm;
1487         struct kvm_memory_slot *slot;
1488         int idx;
1489
1490         if (!handle_valid(handle))
1491                 return 0;
1492
1493         info = (struct kvmgt_guest_info *)handle;
1494         kvm = info->kvm;
1495
1496         idx = srcu_read_lock(&kvm->srcu);
1497         slot = gfn_to_memslot(kvm, gfn);
1498         if (!slot) {
1499                 srcu_read_unlock(&kvm->srcu, idx);
1500                 return -EINVAL;
1501         }
1502
1503         spin_lock(&kvm->mmu_lock);
1504
1505         if (!kvmgt_gfn_is_write_protected(info, gfn))
1506                 goto out;
1507
1508         kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1509         kvmgt_protect_table_del(info, gfn);
1510
1511 out:
1512         spin_unlock(&kvm->mmu_lock);
1513         srcu_read_unlock(&kvm->srcu, idx);
1514         return 0;
1515 }
1516
1517 static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1518                 const u8 *val, int len,
1519                 struct kvm_page_track_notifier_node *node)
1520 {
1521         struct kvmgt_guest_info *info = container_of(node,
1522                                         struct kvmgt_guest_info, track_node);
1523
1524         if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1525                 intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
1526                                                      (void *)val, len);
1527 }
1528
1529 static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1530                 struct kvm_memory_slot *slot,
1531                 struct kvm_page_track_notifier_node *node)
1532 {
1533         int i;
1534         gfn_t gfn;
1535         struct kvmgt_guest_info *info = container_of(node,
1536                                         struct kvmgt_guest_info, track_node);
1537
1538         spin_lock(&kvm->mmu_lock);
1539         for (i = 0; i < slot->npages; i++) {
1540                 gfn = slot->base_gfn + i;
1541                 if (kvmgt_gfn_is_write_protected(info, gfn)) {
1542                         kvm_slot_page_track_remove_page(kvm, slot, gfn,
1543                                                 KVM_PAGE_TRACK_WRITE);
1544                         kvmgt_protect_table_del(info, gfn);
1545                 }
1546         }
1547         spin_unlock(&kvm->mmu_lock);
1548 }
1549
1550 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1551 {
1552         struct intel_vgpu *itr;
1553         struct kvmgt_guest_info *info;
1554         int id;
1555         bool ret = false;
1556
1557         mutex_lock(&vgpu->gvt->lock);
1558         for_each_active_vgpu(vgpu->gvt, itr, id) {
1559                 if (!handle_valid(itr->handle))
1560                         continue;
1561
1562                 info = (struct kvmgt_guest_info *)itr->handle;
1563                 if (kvm && kvm == info->kvm) {
1564                         ret = true;
1565                         goto out;
1566                 }
1567         }
1568 out:
1569         mutex_unlock(&vgpu->gvt->lock);
1570         return ret;
1571 }
1572
1573 static int kvmgt_guest_init(struct mdev_device *mdev)
1574 {
1575         struct kvmgt_guest_info *info;
1576         struct intel_vgpu *vgpu;
1577         struct kvm *kvm;
1578
1579         vgpu = mdev_get_drvdata(mdev);
1580         if (handle_valid(vgpu->handle))
1581                 return -EEXIST;
1582
1583         kvm = vgpu->vdev.kvm;
1584         if (!kvm || kvm->mm != current->mm) {
1585                 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1586                 return -ESRCH;
1587         }
1588
1589         if (__kvmgt_vgpu_exist(vgpu, kvm))
1590                 return -EEXIST;
1591
1592         info = vzalloc(sizeof(struct kvmgt_guest_info));
1593         if (!info)
1594                 return -ENOMEM;
1595
1596         vgpu->handle = (unsigned long)info;
1597         info->vgpu = vgpu;
1598         info->kvm = kvm;
1599         kvm_get_kvm(info->kvm);
1600
1601         kvmgt_protect_table_init(info);
1602         gvt_cache_init(vgpu);
1603
1604         mutex_init(&vgpu->dmabuf_lock);
1605         init_completion(&vgpu->vblank_done);
1606
1607         info->track_node.track_write = kvmgt_page_track_write;
1608         info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1609         kvm_page_track_register_notifier(kvm, &info->track_node);
1610
1611         info->debugfs_cache_entries = debugfs_create_ulong(
1612                                                 "kvmgt_nr_cache_entries",
1613                                                 0444, vgpu->debugfs,
1614                                                 &vgpu->vdev.nr_cache_entries);
1615         if (!info->debugfs_cache_entries)
1616                 gvt_vgpu_err("Cannot create kvmgt debugfs entry\n");
1617
1618         return 0;
1619 }
1620
1621 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1622 {
1623         debugfs_remove(info->debugfs_cache_entries);
1624
1625         kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1626         kvm_put_kvm(info->kvm);
1627         kvmgt_protect_table_destroy(info);
1628         gvt_cache_destroy(info->vgpu);
1629         vfree(info);
1630
1631         return true;
1632 }
1633
1634 static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1635 {
1636         /* nothing to do here */
1637         return 0;
1638 }
1639
1640 static void kvmgt_detach_vgpu(unsigned long handle)
1641 {
1642         /* nothing to do here */
1643 }
1644
1645 static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1646 {
1647         struct kvmgt_guest_info *info;
1648         struct intel_vgpu *vgpu;
1649
1650         if (!handle_valid(handle))
1651                 return -ESRCH;
1652
1653         info = (struct kvmgt_guest_info *)handle;
1654         vgpu = info->vgpu;
1655
1656         if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
1657                 return 0;
1658
1659         return -EFAULT;
1660 }
1661
1662 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1663 {
1664         struct kvmgt_guest_info *info;
1665         kvm_pfn_t pfn;
1666
1667         if (!handle_valid(handle))
1668                 return INTEL_GVT_INVALID_ADDR;
1669
1670         info = (struct kvmgt_guest_info *)handle;
1671
1672         pfn = gfn_to_pfn(info->kvm, gfn);
1673         if (is_error_noslot_pfn(pfn))
1674                 return INTEL_GVT_INVALID_ADDR;
1675
1676         return pfn;
1677 }
1678
1679 int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
1680                 unsigned long size, dma_addr_t *dma_addr)
1681 {
1682         struct kvmgt_guest_info *info;
1683         struct intel_vgpu *vgpu;
1684         struct gvt_dma *entry;
1685         int ret;
1686
1687         if (!handle_valid(handle))
1688                 return -EINVAL;
1689
1690         info = (struct kvmgt_guest_info *)handle;
1691         vgpu = info->vgpu;
1692
1693         mutex_lock(&info->vgpu->vdev.cache_lock);
1694
1695         entry = __gvt_cache_find_gfn(info->vgpu, gfn);
1696         if (!entry) {
1697                 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1698                 if (ret)
1699                         goto err_unlock;
1700
1701                 ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
1702                 if (ret)
1703                         goto err_unmap;
1704         } else {
1705                 kref_get(&entry->ref);
1706                 *dma_addr = entry->dma_addr;
1707         }
1708
1709         mutex_unlock(&info->vgpu->vdev.cache_lock);
1710         return 0;
1711
1712 err_unmap:
1713         gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
1714 err_unlock:
1715         mutex_unlock(&info->vgpu->vdev.cache_lock);
1716         return ret;
1717 }
1718
1719 static void __gvt_dma_release(struct kref *ref)
1720 {
1721         struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
1722
1723         gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
1724                            entry->size);
1725         __gvt_cache_remove_entry(entry->vgpu, entry);
1726 }
1727
1728 void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
1729 {
1730         struct kvmgt_guest_info *info;
1731         struct gvt_dma *entry;
1732
1733         if (!handle_valid(handle))
1734                 return;
1735
1736         info = (struct kvmgt_guest_info *)handle;
1737
1738         mutex_lock(&info->vgpu->vdev.cache_lock);
1739         entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
1740         if (entry)
1741                 kref_put(&entry->ref, __gvt_dma_release);
1742         mutex_unlock(&info->vgpu->vdev.cache_lock);
1743 }
1744
1745 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1746                         void *buf, unsigned long len, bool write)
1747 {
1748         struct kvmgt_guest_info *info;
1749         struct kvm *kvm;
1750         int idx, ret;
1751         bool kthread = current->mm == NULL;
1752
1753         if (!handle_valid(handle))
1754                 return -ESRCH;
1755
1756         info = (struct kvmgt_guest_info *)handle;
1757         kvm = info->kvm;
1758
1759         if (kthread)
1760                 use_mm(kvm->mm);
1761
1762         idx = srcu_read_lock(&kvm->srcu);
1763         ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1764                       kvm_read_guest(kvm, gpa, buf, len);
1765         srcu_read_unlock(&kvm->srcu, idx);
1766
1767         if (kthread)
1768                 unuse_mm(kvm->mm);
1769
1770         return ret;
1771 }
1772
1773 static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
1774                         void *buf, unsigned long len)
1775 {
1776         return kvmgt_rw_gpa(handle, gpa, buf, len, false);
1777 }
1778
1779 static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
1780                         void *buf, unsigned long len)
1781 {
1782         return kvmgt_rw_gpa(handle, gpa, buf, len, true);
1783 }
1784
1785 static unsigned long kvmgt_virt_to_pfn(void *addr)
1786 {
1787         return PFN_DOWN(__pa(addr));
1788 }
1789
1790 static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
1791 {
1792         struct kvmgt_guest_info *info;
1793         struct kvm *kvm;
1794
1795         if (!handle_valid(handle))
1796                 return false;
1797
1798         info = (struct kvmgt_guest_info *)handle;
1799         kvm = info->kvm;
1800
1801         return kvm_is_visible_gfn(kvm, gfn);
1802
1803 }
1804
1805 struct intel_gvt_mpt kvmgt_mpt = {
1806         .host_init = kvmgt_host_init,
1807         .host_exit = kvmgt_host_exit,
1808         .attach_vgpu = kvmgt_attach_vgpu,
1809         .detach_vgpu = kvmgt_detach_vgpu,
1810         .inject_msi = kvmgt_inject_msi,
1811         .from_virt_to_mfn = kvmgt_virt_to_pfn,
1812         .enable_page_track = kvmgt_page_track_add,
1813         .disable_page_track = kvmgt_page_track_remove,
1814         .read_gpa = kvmgt_read_gpa,
1815         .write_gpa = kvmgt_write_gpa,
1816         .gfn_to_mfn = kvmgt_gfn_to_pfn,
1817         .dma_map_guest_page = kvmgt_dma_map_guest_page,
1818         .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
1819         .set_opregion = kvmgt_set_opregion,
1820         .get_vfio_device = kvmgt_get_vfio_device,
1821         .put_vfio_device = kvmgt_put_vfio_device,
1822         .is_valid_gfn = kvmgt_is_valid_gfn,
1823 };
1824 EXPORT_SYMBOL_GPL(kvmgt_mpt);
1825
1826 static int __init kvmgt_init(void)
1827 {
1828         return 0;
1829 }
1830
1831 static void __exit kvmgt_exit(void)
1832 {
1833 }
1834
1835 module_init(kvmgt_init);
1836 module_exit(kvmgt_exit);
1837
1838 MODULE_LICENSE("GPL and additional rights");
1839 MODULE_AUTHOR("Intel Corporation");