drm/virtio: implement context init: stop using drv->context when creating fence
authorGurchetan Singh <gurchetansingh@chromium.org>
Tue, 21 Sep 2021 23:20:20 +0000 (16:20 -0700)
committerGerd Hoffmann <kraxel@redhat.com>
Wed, 29 Sep 2021 07:22:31 +0000 (09:22 +0200)
The plumbing is all here to do this.  Since we always use the
default fence context when allocating a fence, this makes no
functional difference.

We can't process just the largest fence id anymore, since it's
it's associated with different timelines.  It's fine for fence_id
260 to signal before 259.  As such, process each fence_id
individually.

Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org>
Acked-by: Lingfeng Yang <lfy@google.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20210921232024.817-9-gurchetansingh@chromium.org
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
drivers/gpu/drm/virtio/virtgpu_fence.c
drivers/gpu/drm/virtio/virtgpu_vq.c

index 24c728b..98a00c1 100644 (file)
@@ -75,20 +75,25 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
                                                uint64_t base_fence_ctx,
                                                uint32_t ring_idx)
 {
                                                uint64_t base_fence_ctx,
                                                uint32_t ring_idx)
 {
+       uint64_t fence_context = base_fence_ctx + ring_idx;
        struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
        struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
                                                        GFP_KERNEL);
        struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
        struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
                                                        GFP_KERNEL);
+
        if (!fence)
                return fence;
 
        fence->drv = drv;
        if (!fence)
                return fence;
 
        fence->drv = drv;
+       fence->ring_idx = ring_idx;
+       fence->emit_fence_info = !(base_fence_ctx == drv->context);
 
        /* This only partially initializes the fence because the seqno is
         * unknown yet.  The fence must not be used outside of the driver
         * until virtio_gpu_fence_emit is called.
         */
 
        /* This only partially initializes the fence because the seqno is
         * unknown yet.  The fence must not be used outside of the driver
         * until virtio_gpu_fence_emit is called.
         */
-       dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock, drv->context,
-                      0);
+
+       dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock,
+                      fence_context, 0);
 
        return fence;
 }
 
        return fence;
 }
@@ -110,6 +115,13 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
 
        cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
        cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
 
        cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
        cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
+
+       /* Only currently defined fence param. */
+       if (fence->emit_fence_info) {
+               cmd_hdr->flags |=
+                       cpu_to_le32(VIRTIO_GPU_FLAG_INFO_RING_IDX);
+               cmd_hdr->ring_idx = (u8)fence->ring_idx;
+       }
 }
 
 void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
 }
 
 void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
index db77415..7c052ef 100644 (file)
@@ -199,7 +199,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
        struct list_head reclaim_list;
        struct virtio_gpu_vbuffer *entry, *tmp;
        struct virtio_gpu_ctrl_hdr *resp;
        struct list_head reclaim_list;
        struct virtio_gpu_vbuffer *entry, *tmp;
        struct virtio_gpu_ctrl_hdr *resp;
-       u64 fence_id = 0;
+       u64 fence_id;
 
        INIT_LIST_HEAD(&reclaim_list);
        spin_lock(&vgdev->ctrlq.qlock);
 
        INIT_LIST_HEAD(&reclaim_list);
        spin_lock(&vgdev->ctrlq.qlock);
@@ -226,23 +226,14 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
                                DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
                }
                if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
                                DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
                }
                if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
-                       u64 f = le64_to_cpu(resp->fence_id);
-
-                       if (fence_id > f) {
-                               DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
-                                         __func__, fence_id, f);
-                       } else {
-                               fence_id = f;
-                       }
+                       fence_id = le64_to_cpu(resp->fence_id);
+                       virtio_gpu_fence_event_process(vgdev, fence_id);
                }
                if (entry->resp_cb)
                        entry->resp_cb(vgdev, entry);
        }
        wake_up(&vgdev->ctrlq.ack_queue);
 
                }
                if (entry->resp_cb)
                        entry->resp_cb(vgdev, entry);
        }
        wake_up(&vgdev->ctrlq.ack_queue);
 
-       if (fence_id)
-               virtio_gpu_fence_event_process(vgdev, fence_id);
-
        list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
                if (entry->objs)
                        virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
        list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
                if (entry->objs)
                        virtio_gpu_array_put_free_delayed(vgdev, entry->objs);