struct rb_node node;
gfn_t gfn;
unsigned long iova;
- struct list_head list;
};
static inline bool handle_valid(unsigned long handle)
new->gfn = gfn;
new->iova = iova;
- INIT_LIST_HEAD(&new->list);
mutex_lock(&vgpu->vdev.cache_lock);
while (*link) {
kfree(entry);
}
-static void intel_vgpu_unpin_work(struct work_struct *work)
+static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
{
- struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
- vdev.unpin_work);
struct device *dev = mdev_dev(vgpu->vdev.mdev);
struct gvt_dma *this;
- unsigned long gfn;
-
- for (;;) {
- spin_lock(&vgpu->vdev.unpin_lock);
- if (list_empty(&vgpu->vdev.unpin_list)) {
- spin_unlock(&vgpu->vdev.unpin_lock);
- break;
- }
- this = list_first_entry(&vgpu->vdev.unpin_list,
- struct gvt_dma, list);
- list_del(&this->list);
- spin_unlock(&vgpu->vdev.unpin_lock);
-
- gfn = this->gfn;
- vfio_unpin_pages(dev, &gfn, 1);
- kfree(this);
- }
-}
-
-static bool gvt_cache_mark_remove(struct intel_vgpu *vgpu, gfn_t gfn)
-{
- struct gvt_dma *this;
+ unsigned long g1;
+ int rc;
mutex_lock(&vgpu->vdev.cache_lock);
this = __gvt_cache_find(vgpu, gfn);
if (!this) {
mutex_unlock(&vgpu->vdev.cache_lock);
- return false;
+ return;
}
+
+ g1 = gfn;
gvt_dma_unmap_iova(vgpu, this->iova);
- /* remove this from rb tree */
- rb_erase(&this->node, &vgpu->vdev.cache);
+ rc = vfio_unpin_pages(dev, &g1, 1);
+ WARN_ON(rc != 1);
+ __gvt_cache_remove_entry(vgpu, this);
mutex_unlock(&vgpu->vdev.cache_lock);
-
- /* put this to the unpin_list */
- spin_lock(&vgpu->vdev.unpin_lock);
- list_move_tail(&this->list, &vgpu->vdev.unpin_list);
- spin_unlock(&vgpu->vdev.unpin_lock);
-
- return true;
}
static void gvt_cache_init(struct intel_vgpu *vgpu)
}
INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
- INIT_WORK(&vgpu->vdev.unpin_work, intel_vgpu_unpin_work);
- spin_lock_init(&vgpu->vdev.unpin_lock);
- INIT_LIST_HEAD(&vgpu->vdev.unpin_list);
vgpu->vdev.mdev = mdev;
mdev_set_drvdata(mdev, vgpu);
struct intel_vgpu *vgpu = container_of(nb,
struct intel_vgpu,
vdev.iommu_notifier);
- bool sched_unmap = false;
if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
struct vfio_iommu_type1_dma_unmap *unmap = data;
end_gfn = gfn + unmap->size / PAGE_SIZE;
while (gfn < end_gfn)
- sched_unmap |= gvt_cache_mark_remove(vgpu, gfn++);
-
- if (sched_unmap)
- schedule_work(&vgpu->vdev.unpin_work);
+ gvt_cache_remove(vgpu, gfn++);
}
return NOTIFY_OK;