drm/virtio: rework virtio_gpu_transfer_to_host_ioctl fencing
authorGerd Hoffmann <kraxel@redhat.com>
Thu, 29 Aug 2019 10:32:54 +0000 (12:32 +0200)
committerGerd Hoffmann <kraxel@redhat.com>
Wed, 4 Sep 2019 04:54:10 +0000 (06:54 +0200)
Switch to the virtio_gpu_array_* helper workflow.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20190829103301.3539-12-kraxel@redhat.com
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/virtio/virtgpu_plane.c
drivers/gpu/drm/virtio/virtgpu_vq.c

index fa568cb..4f54bf7 100644 (file)
@@ -279,10 +279,10 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
                                   uint32_t resource_id);
 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
-                                       struct virtio_gpu_object *bo,
                                        uint64_t offset,
                                        __le32 width, __le32 height,
                                        __le32 x, __le32 y,
+                                       struct virtio_gpu_object_array *objs,
                                        struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
                                   uint32_t resource_id,
@@ -329,10 +329,10 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
                                          struct virtio_gpu_object_array *objs,
                                          struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
-                                       struct virtio_gpu_object *bo,
                                        uint32_t ctx_id,
                                        uint64_t offset, uint32_t level,
                                        struct virtio_gpu_box *box,
+                                       struct virtio_gpu_object_array *objs,
                                        struct virtio_gpu_fence *fence);
 void
 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
index f2133d3..677d844 100644 (file)
@@ -383,52 +383,44 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
        struct virtio_gpu_device *vgdev = dev->dev_private;
        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
        struct drm_virtgpu_3d_transfer_to_host *args = data;
-       struct ttm_operation_ctx ctx = { true, false };
-       struct drm_gem_object *gobj = NULL;
-       struct virtio_gpu_object *qobj = NULL;
+       struct virtio_gpu_object_array *objs;
        struct virtio_gpu_fence *fence;
        struct virtio_gpu_box box;
        int ret;
        u32 offset = args->offset;
 
-       gobj = drm_gem_object_lookup(file, args->bo_handle);
-       if (gobj == NULL)
+       objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
+       if (objs == NULL)
                return -ENOENT;
 
-       qobj = gem_to_virtio_gpu_obj(gobj);
-
-       ret = virtio_gpu_object_reserve(qobj);
-       if (ret)
-               goto out;
-
-       ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
-       if (unlikely(ret))
-               goto out_unres;
-
        convert_to_hw_box(&box, &args->box);
        if (!vgdev->has_virgl_3d) {
                virtio_gpu_cmd_transfer_to_host_2d
-                       (vgdev, qobj, offset,
-                        box.w, box.h, box.x, box.y, NULL);
+                       (vgdev, offset,
+                        box.w, box.h, box.x, box.y,
+                        objs, NULL);
        } else {
+               ret = virtio_gpu_array_lock_resv(objs);
+               if (ret != 0)
+                       goto err_put_free;
+
+               ret = -ENOMEM;
                fence = virtio_gpu_fence_alloc(vgdev);
-               if (!fence) {
-                       ret = -ENOMEM;
-                       goto out_unres;
-               }
+               if (!fence)
+                       goto err_unlock;
+
                virtio_gpu_cmd_transfer_to_host_3d
-                       (vgdev, qobj,
+                       (vgdev,
                         vfpriv ? vfpriv->ctx_id : 0, offset,
-                        args->level, &box, fence);
-               dma_resv_add_excl_fence(qobj->tbo.base.resv,
-                                                 &fence->f);
+                        args->level, &box, objs, fence);
                dma_fence_put(&fence->f);
        }
+       return 0;
 
-out_unres:
-       virtio_gpu_object_unreserve(qobj);
-out:
-       drm_gem_object_put_unlocked(gobj);
+err_unlock:
+       virtio_gpu_array_unlock_resv(objs);
+err_put_free:
+       virtio_gpu_array_put_free(objs);
        return ret;
 }
 
index f23622b..f4b7360 100644 (file)
@@ -124,12 +124,19 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
                bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
                handle = bo->hw_res_handle;
                if (bo->dumb) {
+                       struct virtio_gpu_object_array *objs;
+
+                       objs = virtio_gpu_array_alloc(1);
+                       if (!objs)
+                               return;
+                       virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
                        virtio_gpu_cmd_transfer_to_host_2d
-                               (vgdev, bo, 0,
+                               (vgdev, 0,
                                 cpu_to_le32(plane->state->src_w >> 16),
                                 cpu_to_le32(plane->state->src_h >> 16),
                                 cpu_to_le32(plane->state->src_x >> 16),
-                                cpu_to_le32(plane->state->src_y >> 16), NULL);
+                                cpu_to_le32(plane->state->src_y >> 16),
+                                objs, NULL);
                }
        } else {
                handle = 0;
@@ -219,11 +226,17 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
 
        if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
                /* new cursor -- update & wait */
+               struct virtio_gpu_object_array *objs;
+
+               objs = virtio_gpu_array_alloc(1);
+               if (!objs)
+                       return;
+               virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
                virtio_gpu_cmd_transfer_to_host_2d
-                       (vgdev, bo, 0,
+                       (vgdev, 0,
                         cpu_to_le32(plane->state->crtc_w),
                         cpu_to_le32(plane->state->crtc_h),
-                        0, 0, vgfb->fence);
+                        0, 0, objs, vgfb->fence);
                dma_fence_wait(&vgfb->fence->f, true);
                dma_fence_put(&vgfb->fence->f);
                vgfb->fence = NULL;
index a8f1dbf..2071409 100644 (file)
@@ -491,12 +491,13 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
 }
 
 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
-                                       struct virtio_gpu_object *bo,
                                        uint64_t offset,
                                        __le32 width, __le32 height,
                                        __le32 x, __le32 y,
+                                       struct virtio_gpu_object_array *objs,
                                        struct virtio_gpu_fence *fence)
 {
+       struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
        struct virtio_gpu_transfer_to_host_2d *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
        bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
@@ -508,6 +509,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
 
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
        memset(cmd_p, 0, sizeof(*cmd_p));
+       vbuf->objs = objs;
 
        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
        cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
@@ -900,12 +902,13 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
 }
 
 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
-                                       struct virtio_gpu_object *bo,
                                        uint32_t ctx_id,
                                        uint64_t offset, uint32_t level,
                                        struct virtio_gpu_box *box,
+                                       struct virtio_gpu_object_array *objs,
                                        struct virtio_gpu_fence *fence)
 {
+       struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
        struct virtio_gpu_transfer_host_3d *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
        bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
@@ -918,6 +921,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
        memset(cmd_p, 0, sizeof(*cmd_p));
 
+       vbuf->objs = objs;
+
        cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
        cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
        cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);