drm/virtio: fence: pass plain pointer
authorGerd Hoffmann <kraxel@redhat.com>
Wed, 28 Nov 2018 15:10:20 +0000 (16:10 +0100)
committerGerd Hoffmann <kraxel@redhat.com>
Thu, 29 Nov 2018 12:45:07 +0000 (13:45 +0100)
Since commit "9fdd90c0f4 drm/virtio: add virtio_gpu_alloc_fence()"
fences are not allocated any more by virtio_gpu_fence_emit().  So there
is no need to pass down a reference to the fence pointer, a plain
pointer is enough now.

Convert virtio_gpu_fence_emit() and callers.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Robert Foss <robert.foss@collabora.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20181128151021.29565-2-kraxel@redhat.com
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_fence.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/virtio/virtgpu_plane.c
drivers/gpu/drm/virtio/virtgpu_vq.c

index 7bec6e3..d6cc1a9 100644 (file)
@@ -273,7 +273,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
                                        uint64_t offset,
                                        __le32 width, __le32 height,
                                        __le32 x, __le32 y,
-                                       struct virtio_gpu_fence **fence);
+                                       struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
                                   uint32_t resource_id,
                                   uint32_t x, uint32_t y,
@@ -284,7 +284,7 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
                                uint32_t x, uint32_t y);
 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
                             struct virtio_gpu_object *obj,
-                            struct virtio_gpu_fence **fence);
+                            struct virtio_gpu_fence *fence);
 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
                              struct virtio_gpu_object *obj);
 int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
@@ -309,23 +309,23 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
                                            uint32_t resource_id);
 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
                           void *data, uint32_t data_size,
-                          uint32_t ctx_id, struct virtio_gpu_fence **fence);
+                          uint32_t ctx_id, struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
                                          uint32_t resource_id, uint32_t ctx_id,
                                          uint64_t offset, uint32_t level,
                                          struct virtio_gpu_box *box,
-                                         struct virtio_gpu_fence **fence);
+                                         struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
                                        struct virtio_gpu_object *bo,
                                        uint32_t ctx_id,
                                        uint64_t offset, uint32_t level,
                                        struct virtio_gpu_box *box,
-                                       struct virtio_gpu_fence **fence);
+                                       struct virtio_gpu_fence *fence);
 void
 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
                                  struct virtio_gpu_object *bo,
                                  struct virtio_gpu_resource_create_3d *rc_3d,
-                                 struct virtio_gpu_fence **fence);
+                                 struct virtio_gpu_fence *fence);
 void virtio_gpu_ctrl_ack(struct virtqueue *vq);
 void virtio_gpu_cursor_ack(struct virtqueue *vq);
 void virtio_gpu_fence_ack(struct virtqueue *vq);
@@ -358,7 +358,7 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(
 void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence);
 int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
                          struct virtio_gpu_ctrl_hdr *cmd_hdr,
-                         struct virtio_gpu_fence **fence);
+                         struct virtio_gpu_fence *fence);
 void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
                                    u64 last_seq);
 
index 6b5d922..4d6826b 100644 (file)
@@ -91,19 +91,19 @@ void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence)
 
 int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
                          struct virtio_gpu_ctrl_hdr *cmd_hdr,
-                         struct virtio_gpu_fence **fence)
+                         struct virtio_gpu_fence *fence)
 {
        struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
        unsigned long irq_flags;
 
        spin_lock_irqsave(&drv->lock, irq_flags);
-       (*fence)->seq = ++drv->sync_seq;
-       dma_fence_get(&(*fence)->f);
-       list_add_tail(&(*fence)->node, &drv->fences);
+       fence->seq = ++drv->sync_seq;
+       dma_fence_get(&fence->f);
+       list_add_tail(&fence->node, &drv->fences);
        spin_unlock_irqrestore(&drv->lock, irq_flags);
 
        cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
-       cmd_hdr->fence_id = cpu_to_le64((*fence)->seq);
+       cmd_hdr->fence_id = cpu_to_le64(fence->seq);
        return 0;
 }
 
index 340f251..e9cdb4c 100644 (file)
@@ -221,7 +221,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
        }
 
        virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
-                             vfpriv->ctx_id, &out_fence);
+                             vfpriv->ctx_id, out_fence);
 
        ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
 
@@ -349,7 +349,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
                }
 
                virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d, NULL);
-               ret = virtio_gpu_object_attach(vgdev, qobj, &fence);
+               ret = virtio_gpu_object_attach(vgdev, qobj, fence);
                if (ret) {
                        virtio_gpu_fence_cleanup(fence);
                        goto fail_backoff;
@@ -450,7 +450,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
        virtio_gpu_cmd_transfer_from_host_3d
                (vgdev, qobj->hw_res_handle,
                 vfpriv->ctx_id, offset, args->level,
-                &box, &fence);
+                &box, fence);
        reservation_object_add_excl_fence(qobj->tbo.resv,
                                          &fence->f);
 
@@ -504,7 +504,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
                virtio_gpu_cmd_transfer_to_host_3d
                        (vgdev, qobj,
                         vfpriv ? vfpriv->ctx_id : 0, offset,
-                        args->level, &box, &fence);
+                        args->level, &box, fence);
                reservation_object_add_excl_fence(qobj->tbo.resv,
                                                  &fence->f);
                dma_fence_put(&fence->f);
index b84ac8c..ead5c53 100644 (file)
@@ -204,7 +204,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
                        (vgdev, bo, 0,
                         cpu_to_le32(plane->state->crtc_w),
                         cpu_to_le32(plane->state->crtc_h),
-                        0, 0, &vgfb->fence);
+                        0, 0, vgfb->fence);
                ret = virtio_gpu_object_reserve(bo, false);
                if (!ret) {
                        reservation_object_add_excl_fence(bo->tbo.resv,
index 2c6764f..9703866 100644 (file)
@@ -298,7 +298,7 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
                                               struct virtio_gpu_vbuffer *vbuf,
                                               struct virtio_gpu_ctrl_hdr *hdr,
-                                              struct virtio_gpu_fence **fence)
+                                              struct virtio_gpu_fence *fence)
 {
        struct virtqueue *vq = vgdev->ctrlq.vq;
        int rc;
@@ -405,7 +405,7 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
 
 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
                                                  uint32_t resource_id,
-                                                 struct virtio_gpu_fence **fence)
+                                                 struct virtio_gpu_fence *fence)
 {
        struct virtio_gpu_resource_detach_backing *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
@@ -467,7 +467,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
                                        uint64_t offset,
                                        __le32 width, __le32 height,
                                        __le32 x, __le32 y,
-                                       struct virtio_gpu_fence **fence)
+                                       struct virtio_gpu_fence *fence)
 {
        struct virtio_gpu_transfer_to_host_2d *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
@@ -497,7 +497,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
                                       uint32_t resource_id,
                                       struct virtio_gpu_mem_entry *ents,
                                       uint32_t nents,
-                                      struct virtio_gpu_fence **fence)
+                                      struct virtio_gpu_fence *fence)
 {
        struct virtio_gpu_resource_attach_backing *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
@@ -821,7 +821,7 @@ void
 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
                                  struct virtio_gpu_object *bo,
                                  struct virtio_gpu_resource_create_3d *rc_3d,
-                                 struct virtio_gpu_fence **fence)
+                                 struct virtio_gpu_fence *fence)
 {
        struct virtio_gpu_resource_create_3d *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
@@ -842,7 +842,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
                                        uint32_t ctx_id,
                                        uint64_t offset, uint32_t level,
                                        struct virtio_gpu_box *box,
-                                       struct virtio_gpu_fence **fence)
+                                       struct virtio_gpu_fence *fence)
 {
        struct virtio_gpu_transfer_host_3d *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
@@ -870,7 +870,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
                                          uint32_t resource_id, uint32_t ctx_id,
                                          uint64_t offset, uint32_t level,
                                          struct virtio_gpu_box *box,
-                                         struct virtio_gpu_fence **fence)
+                                         struct virtio_gpu_fence *fence)
 {
        struct virtio_gpu_transfer_host_3d *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
@@ -890,7 +890,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
 
 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
                           void *data, uint32_t data_size,
-                          uint32_t ctx_id, struct virtio_gpu_fence **fence)
+                          uint32_t ctx_id, struct virtio_gpu_fence *fence)
 {
        struct virtio_gpu_cmd_submit *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
@@ -910,7 +910,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
 
 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
                             struct virtio_gpu_object *obj,
-                            struct virtio_gpu_fence **fence)
+                            struct virtio_gpu_fence *fence)
 {
        bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
        struct virtio_gpu_mem_entry *ents;
@@ -967,7 +967,7 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
        if (use_dma_api && obj->mapped) {
                struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
                /* detach backing and wait for the host process it ... */
-               virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
+               virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
                dma_fence_wait(&fence->f, true);
                dma_fence_put(&fence->f);