YaGL: Implemented multicore rendering and fences
authorStanislav Vorobiov <s.vorobiov@samsung.com>
Thu, 28 Nov 2013 10:51:12 +0000 (14:51 +0400)
committerSeokYeon Hwang <syeon.hwang@samsung.com>
Wed, 9 Apr 2014 05:42:28 +0000 (14:42 +0900)
We now use multicore rendering, i.e. we offload all
rendering to a separate thread and use fences to wait
until certain parts of it are complete. This patch
implements fences via TTM sync objects, it also uses
TTM execbuffer utils to fence buffers and TTM object
files to export fences to user space

Change-Id: Ibed86c3161f3b7207725c8662ffa909d103acedf

23 files changed:
drivers/gpu/drm/vigs/Makefile
drivers/gpu/drm/vigs/vigs_comm.c
drivers/gpu/drm/vigs/vigs_comm.h
drivers/gpu/drm/vigs/vigs_crtc.c
drivers/gpu/drm/vigs/vigs_device.c
drivers/gpu/drm/vigs/vigs_device.h
drivers/gpu/drm/vigs/vigs_driver.c
drivers/gpu/drm/vigs/vigs_execbuffer.c
drivers/gpu/drm/vigs/vigs_execbuffer.h
drivers/gpu/drm/vigs/vigs_fence.c [new file with mode: 0644]
drivers/gpu/drm/vigs/vigs_fence.h [new file with mode: 0644]
drivers/gpu/drm/vigs/vigs_fenceman.c [new file with mode: 0644]
drivers/gpu/drm/vigs/vigs_fenceman.h [new file with mode: 0644]
drivers/gpu/drm/vigs/vigs_file.c [new file with mode: 0644]
drivers/gpu/drm/vigs/vigs_file.h [new file with mode: 0644]
drivers/gpu/drm/vigs/vigs_gem.c
drivers/gpu/drm/vigs/vigs_gem.h
drivers/gpu/drm/vigs/vigs_irq.c
drivers/gpu/drm/vigs/vigs_mman.c
drivers/gpu/drm/vigs/vigs_protocol.h
drivers/gpu/drm/vigs/vigs_regs.h
drivers/gpu/yagl/yagl_ioctl.h
include/drm/vigs_drm.h

index f3d0fb9fb483703e9e8096bb9070ef5fe8110bb0..04b4ad7f435dd61d98798a4c3975ca07a865d2be 100644 (file)
@@ -15,6 +15,9 @@ vigs_drm-y := main.o \
               vigs_framebuffer.o \
               vigs_comm.o \
               vigs_fbdev.o \
-              vigs_irq.o
+              vigs_irq.o \
+              vigs_fence.o \
+              vigs_fenceman.o \
+              vigs_file.o
 
 obj-$(CONFIG_DRM_VIGS) += vigs_drm.o
index 668026f6c8e3b5168a4c0dfb5aa7b778d90bd9a3..8f4981b4bd2b442d3593a3dac679df1a27291d47 100644 (file)
@@ -2,6 +2,7 @@
 #include "vigs_device.h"
 #include "vigs_execbuffer.h"
 #include "vigs_regs.h"
+#include "vigs_fence.h"
 #include <drm/vigs_drm.h>
 
 static int vigs_comm_alloc(struct vigs_comm *comm,
@@ -45,17 +46,13 @@ static int vigs_comm_alloc(struct vigs_comm *comm,
 
     *ptr = comm->execbuffer->gem.kptr;
 
-    memset(*ptr, 0, vigs_gem_size(&comm->execbuffer->gem));
-
     return 0;
 }
 
 static int vigs_comm_prepare(struct vigs_comm *comm,
                              vigsp_cmd cmd,
                              unsigned long request_size,
-                             unsigned long response_size,
-                             void **request,
-                             void **response)
+                             void **request)
 {
     int ret;
     void *ptr;
@@ -65,9 +62,7 @@ static int vigs_comm_prepare(struct vigs_comm *comm,
     ret = vigs_comm_alloc(comm,
                           sizeof(*batch_header) +
                           sizeof(*request_header) +
-                          request_size +
-                          sizeof(struct vigsp_cmd_response_header) +
-                          response_size,
+                          request_size,
                           &ptr);
 
     if (ret != 0) {
@@ -77,7 +72,8 @@ static int vigs_comm_prepare(struct vigs_comm *comm,
     batch_header = ptr;
     request_header = (struct vigsp_cmd_request_header*)(batch_header + 1);
 
-    batch_header->num_requests = 1;
+    batch_header->fence_seq = 0;
+    batch_header->size = sizeof(*request_header) + request_size;
 
     request_header->cmd = cmd;
     request_header->size = request_size;
@@ -86,83 +82,38 @@ static int vigs_comm_prepare(struct vigs_comm *comm,
         *request = (request_header + 1);
     }
 
-    if (response) {
-        *response = (void*)(request_header + 1) +
-                    request_size +
-                    sizeof(struct vigsp_cmd_response_header);
-    }
-
     return 0;
 }
 
-static void vigs_comm_exec_locked(struct vigs_comm *comm,
-                                  struct vigs_execbuffer *execbuffer)
+static void vigs_comm_exec_internal(struct vigs_comm *comm,
+                                    struct vigs_execbuffer *execbuffer)
 {
     writel(vigs_gem_offset(&execbuffer->gem), comm->io_ptr + VIGS_REG_EXEC);
 }
 
-static int vigs_comm_exec_internal(struct vigs_comm *comm)
-{
-    struct vigsp_cmd_batch_header *batch_header = comm->execbuffer->gem.kptr;
-    struct vigsp_cmd_request_header *request_header =
-        (struct vigsp_cmd_request_header*)(batch_header + 1);
-    struct vigsp_cmd_response_header *response_header;
-    vigsp_u32 i;
-
-    for (i = 0; i < batch_header->num_requests; ++i) {
-        request_header =
-            (struct vigsp_cmd_request_header*)((uint8_t*)(request_header + 1) +
-                                               request_header->size);
-    }
-
-    response_header = (struct vigsp_cmd_response_header*)request_header;
-
-    vigs_comm_exec_locked(comm, comm->execbuffer);
-
-    switch (response_header->status) {
-    case vigsp_status_success:
-        return 0;
-    case vigsp_status_bad_call:
-        DRM_ERROR("bad host call\n");
-        return -EINVAL;
-    case vigsp_status_exec_error:
-        DRM_ERROR("host exec error\n");
-        return -EIO;
-    default:
-        DRM_ERROR("fatal host error\n");
-        return -ENXIO;
-    }
-}
-
 static int vigs_comm_init(struct vigs_comm *comm)
 {
     int ret;
     struct vigsp_cmd_init_request *request;
-    struct vigsp_cmd_init_response *response;
 
     ret = vigs_comm_prepare(comm,
                             vigsp_cmd_init,
                             sizeof(*request),
-                            sizeof(*response),
-                            (void**)&request,
-                            (void**)&response);
+                            (void**)&request);
 
     if (ret != 0) {
         return ret;
     }
 
     request->client_version = VIGS_PROTOCOL_VERSION;
+    request->server_version = 0;
 
-    ret = vigs_comm_exec_internal(comm);
+    vigs_comm_exec_internal(comm, comm->execbuffer);
 
-    if (ret != 0) {
-        return ret;
-    }
-
-    if (response->server_version != VIGS_PROTOCOL_VERSION) {
+    if (request->server_version != VIGS_PROTOCOL_VERSION) {
         DRM_ERROR("protocol version mismatch, expected %u, actual %u\n",
                   VIGS_PROTOCOL_VERSION,
-                  response->server_version);
+                  request->server_version);
         return -ENODEV;
     }
 
@@ -173,13 +124,13 @@ static void vigs_comm_exit(struct vigs_comm *comm)
 {
     int ret;
 
-    ret = vigs_comm_prepare(comm, vigsp_cmd_exit, 0, 0, NULL, NULL);
+    ret = vigs_comm_prepare(comm, vigsp_cmd_exit, 0, NULL);
 
     if (ret != 0) {
         return;
     }
 
-    vigs_comm_exec_internal(comm);
+    vigs_comm_exec_internal(comm, comm->execbuffer);
 }
 
 int vigs_comm_create(struct vigs_device *vigs_dev,
@@ -235,9 +186,7 @@ void vigs_comm_destroy(struct vigs_comm *comm)
 void vigs_comm_exec(struct vigs_comm *comm,
                     struct vigs_execbuffer *execbuffer)
 {
-    mutex_lock(&comm->mutex);
-    vigs_comm_exec_locked(comm, execbuffer);
-    mutex_unlock(&comm->mutex);
+    vigs_comm_exec_internal(comm, execbuffer);
 }
 
 int vigs_comm_reset(struct vigs_comm *comm)
@@ -246,10 +195,10 @@ int vigs_comm_reset(struct vigs_comm *comm)
 
     mutex_lock(&comm->mutex);
 
-    ret = vigs_comm_prepare(comm, vigsp_cmd_reset, 0, 0, NULL, NULL);
+    ret = vigs_comm_prepare(comm, vigsp_cmd_reset, 0, NULL);
 
     if (ret == 0) {
-        ret = vigs_comm_exec_internal(comm);
+        vigs_comm_exec_internal(comm, comm->execbuffer);
     }
 
     mutex_unlock(&comm->mutex);
@@ -279,9 +228,7 @@ int vigs_comm_create_surface(struct vigs_comm *comm,
     ret = vigs_comm_prepare(comm,
                             vigsp_cmd_create_surface,
                             sizeof(*request),
-                            0,
-                            (void**)&request,
-                            NULL);
+                            (void**)&request);
 
     if (ret == 0) {
         request->width = width;
@@ -290,7 +237,7 @@ int vigs_comm_create_surface(struct vigs_comm *comm,
         request->format = format;
         request->id = id;
 
-        ret = vigs_comm_exec_internal(comm);
+        vigs_comm_exec_internal(comm, comm->execbuffer);
     }
 
     mutex_unlock(&comm->mutex);
@@ -310,14 +257,12 @@ int vigs_comm_destroy_surface(struct vigs_comm *comm, vigsp_surface_id id)
     ret = vigs_comm_prepare(comm,
                             vigsp_cmd_destroy_surface,
                             sizeof(*request),
-                            0,
-                            (void**)&request,
-                            NULL);
+                            (void**)&request);
 
     if (ret == 0) {
         request->id = id;
 
-        ret = vigs_comm_exec_internal(comm);
+        vigs_comm_exec_internal(comm, comm->execbuffer);
     }
 
     mutex_unlock(&comm->mutex);
@@ -330,28 +275,41 @@ int vigs_comm_set_root_surface(struct vigs_comm *comm,
                                vigsp_offset offset)
 {
     int ret;
+    struct vigs_fence *fence;
     struct vigsp_cmd_set_root_surface_request *request;
 
     DRM_DEBUG_DRIVER("id = %u, offset = %u\n", id, offset);
 
+    ret = vigs_fence_create(comm->vigs_dev->fenceman, &fence);
+
+    if (ret != 0) {
+        return ret;
+    }
+
     mutex_lock(&comm->mutex);
 
     ret = vigs_comm_prepare(comm,
                             vigsp_cmd_set_root_surface,
                             sizeof(*request),
-                            0,
-                            (void**)&request,
-                            NULL);
+                            (void**)&request);
 
     if (ret == 0) {
         request->id = id;
         request->offset = offset;
 
-        ret = vigs_comm_exec_internal(comm);
+        vigs_execbuffer_fence(comm->execbuffer, fence);
+
+        vigs_comm_exec_internal(comm, comm->execbuffer);
     }
 
     mutex_unlock(&comm->mutex);
 
+    if (ret == 0) {
+        vigs_fence_wait(fence, false);
+    }
+
+    vigs_fence_unref(fence);
+
     return ret;
 }
 
@@ -360,28 +318,41 @@ int vigs_comm_update_vram(struct vigs_comm *comm,
                           vigsp_offset offset)
 {
     int ret;
+    struct vigs_fence *fence;
     struct vigsp_cmd_update_vram_request *request;
 
     DRM_DEBUG_DRIVER("id = %u, offset = %u\n", id, offset);
 
+    ret = vigs_fence_create(comm->vigs_dev->fenceman, &fence);
+
+    if (ret != 0) {
+        return ret;
+    }
+
     mutex_lock(&comm->mutex);
 
     ret = vigs_comm_prepare(comm,
                             vigsp_cmd_update_vram,
                             sizeof(*request),
-                            0,
-                            (void**)&request,
-                            NULL);
+                            (void**)&request);
 
     if (ret == 0) {
         request->sfc_id = id;
         request->offset = offset;
 
-        ret = vigs_comm_exec_internal(comm);
+        vigs_execbuffer_fence(comm->execbuffer, fence);
+
+        vigs_comm_exec_internal(comm, comm->execbuffer);
     }
 
     mutex_unlock(&comm->mutex);
 
+    if (ret == 0) {
+        vigs_fence_wait(fence, false);
+    }
+
+    vigs_fence_unref(fence);
+
     return ret;
 }
 
@@ -392,18 +363,23 @@ int vigs_comm_update_gpu(struct vigs_comm *comm,
                          vigsp_offset offset)
 {
     int ret;
+    struct vigs_fence *fence;
     struct vigsp_cmd_update_gpu_request *request;
 
     DRM_DEBUG_DRIVER("id = %u, offset = %u\n", id, offset);
 
+    ret = vigs_fence_create(comm->vigs_dev->fenceman, &fence);
+
+    if (ret != 0) {
+        return ret;
+    }
+
     mutex_lock(&comm->mutex);
 
     ret = vigs_comm_prepare(comm,
                             vigsp_cmd_update_gpu,
                             sizeof(*request) + sizeof(struct vigsp_rect),
-                            0,
-                            (void**)&request,
-                            NULL);
+                            (void**)&request);
 
     if (ret == 0) {
         request->sfc_id = id;
@@ -414,14 +390,53 @@ int vigs_comm_update_gpu(struct vigs_comm *comm,
         request->entries[0].size.w = width;
         request->entries[0].size.h = height;
 
-        ret = vigs_comm_exec_internal(comm);
+        vigs_execbuffer_fence(comm->execbuffer, fence);
+
+        vigs_comm_exec_internal(comm, comm->execbuffer);
     }
 
     mutex_unlock(&comm->mutex);
 
+    if (ret == 0) {
+        vigs_fence_wait(fence, false);
+    }
+
+    vigs_fence_unref(fence);
+
     return ret;
 }
 
+int vigs_comm_fence(struct vigs_comm *comm, struct vigs_fence *fence)
+{
+    struct vigsp_cmd_batch_header *batch_header;
+    int ret;
+
+    DRM_DEBUG_DRIVER("seq = %u\n", fence->seq);
+
+    mutex_lock(&comm->mutex);
+
+    ret = vigs_comm_alloc(comm,
+                          sizeof(*batch_header),
+                          (void**)&batch_header);
+
+    if (ret != 0) {
+        mutex_unlock(&comm->mutex);
+
+        return ret;
+    }
+
+    batch_header->fence_seq = 0;
+    batch_header->size = 0;
+
+    vigs_execbuffer_fence(comm->execbuffer, fence);
+
+    vigs_comm_exec_internal(comm, comm->execbuffer);
+
+    mutex_unlock(&comm->mutex);
+
+    return 0;
+}
+
 int vigs_comm_get_protocol_version_ioctl(struct drm_device *drm_dev,
                                          void *data,
                                          struct drm_file *file_priv)
index 5372696fb3e0f78fcb5b6d79885477216a1fa0e9..4fa0925859371ac80bc1118ab06433d7edc62438 100644 (file)
@@ -9,6 +9,7 @@ struct drm_device;
 struct drm_file;
 struct vigs_device;
 struct vigs_execbuffer;
+struct vigs_fence;
 
 struct vigs_comm
 {
@@ -63,6 +64,8 @@ int vigs_comm_update_gpu(struct vigs_comm *comm,
                          u32 height,
                          vigsp_offset offset);
 
+int vigs_comm_fence(struct vigs_comm *comm, struct vigs_fence *fence);
+
 /*
  * IOCTLs
  * @{
index 72d0ad442c60fa28ea2c6568630aeb84a1abb41b..bed70d3c22c9e02607d43a2bbb381b9372f8ce7f 100644 (file)
@@ -64,6 +64,7 @@ retry:
 
     if (ret != 0) {
         vigs_framebuffer_unpin(vigs_fb);
+
         return ret;
     }
 
index e2642fec8cbde17d7b38926da1c899bedfa07ee0..0b30d0816605974baf8d6c081fef2779300047c8 100644 (file)
@@ -1,5 +1,6 @@
 #include "vigs_device.h"
 #include "vigs_mman.h"
+#include "vigs_fenceman.h"
 #include "vigs_crtc.h"
 #include "vigs_output.h"
 #include "vigs_framebuffer.h"
@@ -87,210 +88,6 @@ static struct vigs_mman_ops mman_ops =
     .cleanup_vma = &vigs_device_mman_cleanup_vma
 };
 
-static struct vigs_surface
-    *vigs_device_reference_surface_unlocked(struct vigs_device *vigs_dev,
-                                            vigsp_surface_id sfc_id)
-{
-    struct vigs_surface *sfc;
-
-    mutex_lock(&vigs_dev->drm_dev->struct_mutex);
-
-    mutex_lock(&vigs_dev->surface_idr_mutex);
-
-    sfc = idr_find(&vigs_dev->surface_idr, sfc_id);
-
-    if (sfc) {
-        if (vigs_gem_freed(&sfc->gem)) {
-            sfc = NULL;
-        } else {
-            drm_gem_object_reference(&sfc->gem.base);
-        }
-    }
-
-    mutex_unlock(&vigs_dev->surface_idr_mutex);
-
-    mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
-
-    return sfc;
-}
-
-static bool vigs_gem_is_reserved(struct list_head *gem_list,
-                                 struct vigs_gem_object *gem)
-{
-    struct vigs_gem_object *tmp;
-
-    list_for_each_entry(tmp, gem_list, list) {
-        if (tmp == gem) {
-            return true;
-        }
-    }
-
-    return false;
-}
-
-static struct vigs_surface
-    *vigs_surface_reserve(struct vigs_device *vigs_dev,
-                          struct list_head *gem_list,
-                          vigsp_surface_id sfc_id)
-{
-    struct vigs_surface *sfc =
-        vigs_device_reference_surface_unlocked(vigs_dev, sfc_id);
-
-    if (!sfc) {
-        DRM_ERROR("Surface %u not found\n", sfc_id);
-        return NULL;
-    }
-
-    if (vigs_gem_is_reserved(gem_list, &sfc->gem)) {
-        drm_gem_object_unreference_unlocked(&sfc->gem.base);
-    } else {
-        vigs_gem_reserve(&sfc->gem);
-        list_add_tail(&sfc->gem.list, gem_list);
-    }
-
-    return sfc;
-}
-
-/*
- * 'gem_list' will hold a list of GEMs that should be
- * unreserved and unreferenced after execution.
- */
-static int vigs_device_patch_commands(struct vigs_device *vigs_dev,
-                                      void *data,
-                                      u32 data_size,
-                                      struct list_head* gem_list)
-{
-    struct vigsp_cmd_batch_header *batch_header = data;
-    struct vigsp_cmd_request_header *request_header =
-        (struct vigsp_cmd_request_header*)(batch_header + 1);
-    union
-    {
-        struct vigsp_cmd_update_vram_request *update_vram;
-        struct vigsp_cmd_update_gpu_request *update_gpu;
-        struct vigsp_cmd_copy_request *copy;
-        struct vigsp_cmd_solid_fill_request *solid_fill;
-        void *data;
-    } request;
-    vigsp_u32 i;
-    struct vigs_surface *sfc;
-    int ret = 0;
-
-    /*
-     * GEM is always at least PAGE_SIZE long, so don't check
-     * if batch header is out of bounds.
-     */
-
-    for (i = 0; i < batch_header->num_requests; ++i) {
-        if (((void*)(request_header) + sizeof(*request_header)) >
-            (data + data_size)) {
-            DRM_ERROR("request header outside of GEM\n");
-            ret = -EINVAL;
-            break;
-        }
-
-        if (((void*)(request_header + 1) + request_header->size) >
-            (data + data_size)) {
-            DRM_ERROR("request data outside of GEM\n");
-            ret = -EINVAL;
-            break;
-        }
-
-        request.data = (request_header + 1);
-
-        switch (request_header->cmd) {
-        case vigsp_cmd_update_vram:
-            sfc = vigs_surface_reserve(vigs_dev,
-                                       gem_list,
-                                       request.update_vram->sfc_id);
-            if (!sfc) {
-                ret = -EINVAL;
-                break;
-            }
-            if (vigs_gem_in_vram(&sfc->gem)) {
-                if (vigs_surface_need_vram_update(sfc)) {
-                    request.update_vram->offset = vigs_gem_offset(&sfc->gem);
-                    sfc->is_gpu_dirty = false;
-                } else {
-                    DRM_DEBUG_DRIVER("Surface %u doesn't need to be updated, ignoring update_vram\n",
-                                     request.update_vram->sfc_id);
-                    request.update_vram->sfc_id = 0;
-                }
-            } else {
-                DRM_DEBUG_DRIVER("Surface %u not in VRAM, ignoring update_vram\n",
-                                 request.update_vram->sfc_id);
-                request.update_vram->sfc_id = 0;
-            }
-            break;
-        case vigsp_cmd_update_gpu:
-            sfc = vigs_surface_reserve(vigs_dev,
-                                       gem_list,
-                                       request.update_gpu->sfc_id);
-            if (!sfc) {
-                ret = -EINVAL;
-                break;
-            }
-            if (vigs_gem_in_vram(&sfc->gem)) {
-                if (vigs_surface_need_gpu_update(sfc)) {
-                    request.update_gpu->offset = vigs_gem_offset(&sfc->gem);
-                    sfc->is_gpu_dirty = false;
-                } else {
-                    DRM_DEBUG_DRIVER("Surface %u doesn't need to be updated, ignoring update_gpu\n",
-                                     request.update_gpu->sfc_id);
-                    request.update_gpu->sfc_id = 0;
-                }
-            } else {
-                DRM_DEBUG_DRIVER("Surface %u not in VRAM, ignoring update_gpu\n",
-                                 request.update_gpu->sfc_id);
-                request.update_gpu->sfc_id = 0;
-            }
-            break;
-        case vigsp_cmd_copy:
-            sfc = vigs_surface_reserve(vigs_dev,
-                                       gem_list,
-                                       request.copy->dst_id);
-            if (!sfc) {
-                ret = -EINVAL;
-                break;
-            }
-            if (vigs_gem_in_vram(&sfc->gem)) {
-                sfc->is_gpu_dirty = true;
-            }
-            break;
-        case vigsp_cmd_solid_fill:
-            sfc = vigs_surface_reserve(vigs_dev,
-                                       gem_list,
-                                       request.solid_fill->sfc_id);
-            if (!sfc) {
-                ret = -EINVAL;
-                break;
-            }
-            if (vigs_gem_in_vram(&sfc->gem)) {
-                sfc->is_gpu_dirty = true;
-            }
-            break;
-        default:
-            break;
-        }
-
-        request_header =
-            (struct vigsp_cmd_request_header*)(request.data +
-                                               request_header->size);
-    }
-
-    return 0;
-}
-
-static void vigs_device_finish_patch_commands(struct list_head* gem_list)
-{
-    struct vigs_gem_object *gem, *gem_tmp;
-
-    list_for_each_entry_safe(gem, gem_tmp, gem_list, list) {
-        list_del(&gem->list);
-        vigs_gem_unreserve(gem);
-        drm_gem_object_unreference_unlocked(&gem->base);
-    }
-}
-
 int vigs_device_init(struct vigs_device *vigs_dev,
                      struct drm_device *drm_dev,
                      struct pci_dev *pci_dev,
@@ -352,12 +149,29 @@ int vigs_device_init(struct vigs_device *vigs_dev,
         goto fail2;
     }
 
+    vigs_dev->obj_dev = ttm_object_device_init(vigs_dev->mman->mem_global_ref.object,
+                                               12);
+
+    if (!vigs_dev->obj_dev) {
+        DRM_ERROR("Unable to initialize obj_dev\n");
+        ret = -ENOMEM;
+        goto fail3;
+    }
+
+    ret = vigs_fenceman_create(&vigs_dev->fenceman);
+
+    if (ret != 0) {
+        goto fail4;
+    }
+
     ret = vigs_comm_create(vigs_dev, &vigs_dev->comm);
 
     if (ret != 0) {
-        goto fail3;
+        goto fail5;
     }
 
+    spin_lock_init(&vigs_dev->irq_lock);
+
     drm_mode_config_init(vigs_dev->drm_dev);
 
     vigs_framebuffer_config_init(vigs_dev);
@@ -365,19 +179,19 @@ int vigs_device_init(struct vigs_device *vigs_dev,
     ret = vigs_crtc_init(vigs_dev);
 
     if (ret != 0) {
-        goto fail4;
+        goto fail6;
     }
 
     ret = vigs_output_init(vigs_dev);
 
     if (ret != 0) {
-        goto fail4;
+        goto fail6;
     }
 
     ret = drm_vblank_init(drm_dev, 1);
 
     if (ret != 0) {
-        goto fail4;
+        goto fail6;
     }
 
     /*
@@ -389,24 +203,28 @@ int vigs_device_init(struct vigs_device *vigs_dev,
     ret = drm_irq_install(drm_dev);
 
     if (ret != 0) {
-        goto fail5;
+        goto fail7;
     }
 
     ret = vigs_fbdev_create(vigs_dev, &vigs_dev->fbdev);
 
     if (ret != 0) {
-        goto fail6;
+        goto fail8;
     }
 
     return 0;
 
-fail6:
+fail8:
     drm_irq_uninstall(drm_dev);
-fail5:
+fail7:
     drm_vblank_cleanup(drm_dev);
-fail4:
+fail6:
     drm_mode_config_cleanup(vigs_dev->drm_dev);
     vigs_comm_destroy(vigs_dev->comm);
+fail5:
+    vigs_fenceman_destroy(vigs_dev->fenceman);
+fail4:
+    ttm_object_device_release(&vigs_dev->obj_dev);
 fail3:
     vigs_mman_destroy(vigs_dev->mman);
 fail2:
@@ -427,6 +245,8 @@ void vigs_device_cleanup(struct vigs_device *vigs_dev)
     drm_vblank_cleanup(vigs_dev->drm_dev);
     drm_mode_config_cleanup(vigs_dev->drm_dev);
     vigs_comm_destroy(vigs_dev->comm);
+    vigs_fenceman_destroy(vigs_dev->fenceman);
+    ttm_object_device_release(&vigs_dev->obj_dev);
     vigs_mman_destroy(vigs_dev->mman);
     drm_rmmap(vigs_dev->drm_dev, vigs_dev->io_map);
     idr_destroy(&vigs_dev->surface_idr);
@@ -481,6 +301,29 @@ void vigs_device_remove_surface(struct vigs_device *vigs_dev,
     mutex_unlock(&vigs_dev->surface_idr_mutex);
 }
 
+struct vigs_surface
+    *vigs_device_reference_surface(struct vigs_device *vigs_dev,
+                                   vigsp_surface_id sfc_id)
+{
+    struct vigs_surface *sfc;
+
+    mutex_lock(&vigs_dev->surface_idr_mutex);
+
+    sfc = idr_find(&vigs_dev->surface_idr, sfc_id);
+
+    if (sfc) {
+        if (vigs_gem_freed(&sfc->gem)) {
+            sfc = NULL;
+        } else {
+            drm_gem_object_reference(&sfc->gem.base);
+        }
+    }
+
+    mutex_unlock(&vigs_dev->surface_idr_mutex);
+
+    return sfc;
+}
+
 int vigs_device_add_surface_unlocked(struct vigs_device *vigs_dev,
                                      struct vigs_surface *sfc,
                                      vigsp_surface_id* id)
@@ -501,67 +344,3 @@ void vigs_device_remove_surface_unlocked(struct vigs_device *vigs_dev,
     vigs_device_remove_surface(vigs_dev, sfc_id);
     mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
 }
-
-int vigs_device_exec_ioctl(struct drm_device *drm_dev,
-                           void *data,
-                           struct drm_file *file_priv)
-{
-    struct vigs_device *vigs_dev = drm_dev->dev_private;
-    struct drm_vigs_exec *args = data;
-    struct drm_gem_object *gem;
-    struct vigs_gem_object *vigs_gem;
-    struct vigs_execbuffer *execbuffer;
-    struct list_head gem_list;
-    int ret;
-
-    INIT_LIST_HEAD(&gem_list);
-
-    gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
-
-    if (gem == NULL) {
-        return -ENOENT;
-    }
-
-    vigs_gem = gem_to_vigs_gem(gem);
-
-    if (vigs_gem->type != VIGS_GEM_TYPE_EXECBUFFER) {
-        drm_gem_object_unreference_unlocked(gem);
-        return -ENOENT;
-    }
-
-    execbuffer = vigs_gem_to_vigs_execbuffer(vigs_gem);
-
-    vigs_gem_reserve(vigs_gem);
-
-    /*
-     * Never unmap for optimization, but we got to be careful,
-     * worst case scenario is when whole RAM BAR is mapped into kernel.
-     */
-    ret = vigs_gem_kmap(vigs_gem);
-
-    if (ret != 0) {
-        vigs_gem_unreserve(vigs_gem);
-        drm_gem_object_unreference_unlocked(gem);
-        return ret;
-    }
-
-    vigs_gem_unreserve(vigs_gem);
-
-    ret = vigs_device_patch_commands(vigs_dev,
-                                     execbuffer->gem.kptr,
-                                     vigs_gem_size(&execbuffer->gem),
-                                     &gem_list);
-
-    if (ret != 0) {
-        vigs_device_finish_patch_commands(&gem_list);
-        drm_gem_object_unreference_unlocked(gem);
-        return ret;
-    }
-
-    vigs_comm_exec(vigs_dev->comm, execbuffer);
-
-    vigs_device_finish_patch_commands(&gem_list);
-    drm_gem_object_unreference_unlocked(gem);
-
-    return 0;
-}
index 368b1a961a76269a368567a220e2c912c28de8a7..1ea36dab6e9a1c93e740b536b585f12c1f40ac79 100644 (file)
@@ -5,6 +5,7 @@
 #include "vigs_protocol.h"
 
 struct vigs_mman;
+struct vigs_fenceman;
 struct vigs_comm;
 struct vigs_fbdev;
 struct vigs_surface;
@@ -34,10 +35,21 @@ struct vigs_device
 
     struct vigs_mman *mman;
 
+    struct ttm_object_device *obj_dev;
+
+    struct vigs_fenceman *fenceman;
+
     struct vigs_comm *comm;
 
     struct vigs_fbdev *fbdev;
 
+    /*
+     * We need this because it's essential to read 'lower' and 'upper'
+     * fence acks atomically in IRQ handler and on SMP systems IRQ handler
+     * can be run on several CPUs concurrently.
+     */
+    spinlock_t irq_lock;
+
     /*
      * A hack we're forced to have in order to tell if we
      * need to track GEM access or not in 'vigs_device_mmap'.
@@ -63,6 +75,10 @@ int vigs_device_add_surface(struct vigs_device *vigs_dev,
 void vigs_device_remove_surface(struct vigs_device *vigs_dev,
                                 vigsp_surface_id sfc_id);
 
+struct vigs_surface
+    *vigs_device_reference_surface(struct vigs_device *vigs_dev,
+                                   vigsp_surface_id sfc_id);
+
 /*
  * Locks drm_device::struct_mutex.
  * @{
@@ -79,17 +95,4 @@ void vigs_device_remove_surface_unlocked(struct vigs_device *vigs_dev,
  * @}
  */
 
-/*
- * IOCTLs
- * @{
- */
-
-int vigs_device_exec_ioctl(struct drm_device *drm_dev,
-                           void *data,
-                           struct drm_file *file_priv);
-
-/*
- * @}
- */
-
 #endif
index 73da60637c10c6c5c4f2b1f043c04b3bde9530ac..0c06d4386eb67bc6f7690eb19a0d663de8ad813e 100644 (file)
@@ -6,6 +6,9 @@
 #include "vigs_surface.h"
 #include "vigs_execbuffer.h"
 #include "vigs_irq.h"
+#include "vigs_fence.h"
+#include "vigs_file.h"
+#include "vigs_mman.h"
 #include "drmP.h"
 #include "drm.h"
 #include <linux/module.h>
@@ -42,16 +45,26 @@ static struct drm_ioctl_desc vigs_drm_ioctls[] =
                                               DRM_UNLOCKED | DRM_AUTH),
     DRM_IOCTL_DEF_DRV(VIGS_GEM_MAP, vigs_gem_map_ioctl,
                                     DRM_UNLOCKED | DRM_AUTH),
+    DRM_IOCTL_DEF_DRV(VIGS_GEM_WAIT, vigs_gem_wait_ioctl,
+                                     DRM_UNLOCKED | DRM_AUTH),
     DRM_IOCTL_DEF_DRV(VIGS_SURFACE_INFO, vigs_surface_info_ioctl,
                                          DRM_UNLOCKED | DRM_AUTH),
-    DRM_IOCTL_DEF_DRV(VIGS_EXEC, vigs_device_exec_ioctl,
+    DRM_IOCTL_DEF_DRV(VIGS_EXEC, vigs_execbuffer_exec_ioctl,
                                  DRM_UNLOCKED | DRM_AUTH),
     DRM_IOCTL_DEF_DRV(VIGS_SURFACE_SET_GPU_DIRTY, vigs_surface_set_gpu_dirty_ioctl,
                                                   DRM_UNLOCKED | DRM_AUTH),
     DRM_IOCTL_DEF_DRV(VIGS_SURFACE_START_ACCESS, vigs_surface_start_access_ioctl,
                                                  DRM_UNLOCKED | DRM_AUTH),
     DRM_IOCTL_DEF_DRV(VIGS_SURFACE_END_ACCESS, vigs_surface_end_access_ioctl,
-                                               DRM_UNLOCKED | DRM_AUTH)
+                                               DRM_UNLOCKED | DRM_AUTH),
+    DRM_IOCTL_DEF_DRV(VIGS_CREATE_FENCE, vigs_fence_create_ioctl,
+                                         DRM_UNLOCKED | DRM_AUTH),
+    DRM_IOCTL_DEF_DRV(VIGS_FENCE_WAIT, vigs_fence_wait_ioctl,
+                                       DRM_UNLOCKED | DRM_AUTH),
+    DRM_IOCTL_DEF_DRV(VIGS_FENCE_SIGNALED, vigs_fence_signaled_ioctl,
+                                           DRM_UNLOCKED | DRM_AUTH),
+    DRM_IOCTL_DEF_DRV(VIGS_FENCE_UNREF, vigs_fence_unref_ioctl,
+                                        DRM_UNLOCKED | DRM_AUTH)
 };
 
 static const struct file_operations vigs_drm_driver_fops =
@@ -110,6 +123,29 @@ static int vigs_drm_unload(struct drm_device *dev)
     return 0;
 }
 
+static int vigs_drm_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+    int ret = 0;
+    struct vigs_device *vigs_dev = dev->dev_private;
+    struct vigs_file *vigs_file;
+
+    DRM_DEBUG_DRIVER("enter\n");
+
+    ret = vigs_file_create(vigs_dev, &vigs_file);
+
+    if (ret != 0) {
+        return ret;
+    }
+
+    file_priv->driver_priv = vigs_file;
+
+    if (unlikely(vigs_dev->mman->bo_dev.dev_mapping == NULL)) {
+        vigs_dev->mman->bo_dev.dev_mapping =
+            file_priv->filp->f_path.dentry->d_inode->i_mapping;
+    }
+
+    return 0;
+}
 
 static void vigs_drm_preclose(struct drm_device *dev,
                               struct drm_file *file_priv)
@@ -137,7 +173,13 @@ static void vigs_drm_preclose(struct drm_device *dev,
 static void vigs_drm_postclose(struct drm_device *dev,
                                struct drm_file *file_priv)
 {
+    struct vigs_file *vigs_file = file_priv->driver_priv;
+
     DRM_DEBUG_DRIVER("enter\n");
+
+    vigs_file_destroy(vigs_file);
+
+    file_priv->driver_priv = NULL;
 }
 
 static void vigs_drm_lastclose(struct drm_device *dev)
@@ -159,6 +201,7 @@ static struct drm_driver vigs_drm_driver =
                        DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
     .load = vigs_drm_load,
     .unload = vigs_drm_unload,
+    .open = vigs_drm_open,
     .preclose = vigs_drm_preclose,
     .postclose = vigs_drm_postclose,
     .lastclose = vigs_drm_lastclose,
index a71c8363da986fb5894243417f3bbb59905008d0..32b508aed5fb3e96924ab44ec44c5c4ad23dbfab 100644 (file)
@@ -1,6 +1,62 @@
 #include "vigs_execbuffer.h"
+#include "vigs_device.h"
+#include "vigs_surface.h"
+#include "vigs_comm.h"
+#include "vigs_fence.h"
 #include <drm/vigs_drm.h>
 
+union vigs_request
+{
+    struct vigsp_cmd_update_vram_request *update_vram;
+    struct vigsp_cmd_update_gpu_request *update_gpu;
+    struct vigsp_cmd_copy_request *copy;
+    struct vigsp_cmd_solid_fill_request *solid_fill;
+    void *data;
+};
+
+static int vigs_execbuffer_validate_buffer(struct vigs_device *vigs_dev,
+                                           struct vigs_validate_buffer *buffer,
+                                           struct list_head* list,
+                                           vigsp_surface_id sfc_id,
+                                           vigsp_cmd cmd,
+                                           int which,
+                                           void *data)
+{
+    struct vigs_surface *sfc = vigs_device_reference_surface(vigs_dev, sfc_id);
+    struct vigs_validate_buffer *tmp;
+
+    if (!sfc) {
+        DRM_ERROR("Surface %u not found\n", sfc_id);
+        return -EINVAL;
+    }
+
+    buffer->base.new_sync_obj_arg = NULL;
+    buffer->base.bo = &sfc->gem.bo;
+    buffer->cmd = cmd;
+    buffer->which = which;
+    buffer->data = data;
+
+    list_for_each_entry(tmp, list, base.head) {
+        if (tmp->base.bo == buffer->base.bo) {
+            /*
+             * Already on the list, we're done.
+             */
+            return 0;
+        }
+    }
+
+    list_add_tail(&buffer->base.head, list);
+
+    return 0;
+}
+
+static void vigs_execbuffer_clear_validation(struct vigs_validate_buffer *buffer)
+{
+    struct vigs_gem_object *gem = bo_to_vigs_gem(buffer->base.bo);
+
+    drm_gem_object_unreference(&gem->base);
+}
+
 static void vigs_execbuffer_destroy(struct vigs_gem_object *gem)
 {
 }
@@ -38,6 +94,279 @@ fail1:
     return ret;
 }
 
+int vigs_execbuffer_validate_buffers(struct vigs_execbuffer *execbuffer,
+                                     struct list_head* list,
+                                     struct vigs_validate_buffer **buffers,
+                                     int *num_buffers,
+                                     bool *sync)
+{
+    struct vigs_device *vigs_dev = execbuffer->gem.base.dev->dev_private;
+    void *data = execbuffer->gem.kptr;
+    u32 data_size = vigs_gem_size(&execbuffer->gem);
+    struct vigsp_cmd_batch_header *batch_header = data;
+    struct vigsp_cmd_request_header *request_header =
+        (struct vigsp_cmd_request_header*)(batch_header + 1);
+    union vigs_request request;
+    int num_commands = 0, ret = 0;
+
+    *num_buffers = 0;
+    *sync = false;
+
+    /*
+     * GEM is always at least PAGE_SIZE long, so don't check
+     * if batch header is out of bounds.
+     */
+
+    while ((void*)request_header <
+           ((void*)(batch_header + 1) + batch_header->size)) {
+        if (((void*)(request_header) + sizeof(*request_header)) >
+            (data + data_size)) {
+            DRM_ERROR("request header outside of GEM\n");
+            ret = -EINVAL;
+            goto fail1;
+        }
+
+        if (((void*)(request_header + 1) + request_header->size) >
+            (data + data_size)) {
+            DRM_ERROR("request data outside of GEM\n");
+            ret = -EINVAL;
+            goto fail1;
+        }
+
+        request.data = (request_header + 1);
+
+        switch (request_header->cmd) {
+        case vigsp_cmd_update_vram:
+        case vigsp_cmd_update_gpu:
+            *sync = true;
+            *num_buffers += 1;
+            break;
+        case vigsp_cmd_copy:
+            *num_buffers += 2;
+            break;
+        case vigsp_cmd_solid_fill:
+            *num_buffers += 1;
+            break;
+        default:
+            break;
+        }
+
+        request_header =
+            (struct vigsp_cmd_request_header*)(request.data +
+                                               request_header->size);
+
+        ++num_commands;
+    }
+
+    *buffers = kmalloc(*num_buffers * sizeof(**buffers), GFP_KERNEL);
+
+    if (!*buffers) {
+        ret = -ENOMEM;
+        goto fail1;
+    }
+
+    request_header = (struct vigsp_cmd_request_header*)(batch_header + 1);
+
+    mutex_lock(&vigs_dev->drm_dev->struct_mutex);
+
+    *num_buffers = 0;
+
+    while (--num_commands >= 0) {
+        request.data = (request_header + 1);
+
+        switch (request_header->cmd) {
+        case vigsp_cmd_update_vram:
+            ret = vigs_execbuffer_validate_buffer(vigs_dev,
+                                                  &(*buffers)[*num_buffers],
+                                                  list,
+                                                  request.update_vram->sfc_id,
+                                                  request_header->cmd,
+                                                  0,
+                                                  request.data);
+
+            if (ret != 0) {
+                goto fail2;
+            }
+
+            ++*num_buffers;
+
+            break;
+        case vigsp_cmd_update_gpu:
+            ret = vigs_execbuffer_validate_buffer(vigs_dev,
+                                                  &(*buffers)[*num_buffers],
+                                                  list,
+                                                  request.update_gpu->sfc_id,
+                                                  request_header->cmd,
+                                                  0,
+                                                  request.data);
+
+            if (ret != 0) {
+                goto fail2;
+            }
+
+            ++*num_buffers;
+
+            break;
+        case vigsp_cmd_copy:
+            ret = vigs_execbuffer_validate_buffer(vigs_dev,
+                                                  &(*buffers)[*num_buffers],
+                                                  list,
+                                                  request.copy->src_id,
+                                                  request_header->cmd,
+                                                  0,
+                                                  request.data);
+
+            if (ret != 0) {
+                goto fail2;
+            }
+
+            ++*num_buffers;
+
+            ret = vigs_execbuffer_validate_buffer(vigs_dev,
+                                                  &(*buffers)[*num_buffers],
+                                                  list,
+                                                  request.copy->dst_id,
+                                                  request_header->cmd,
+                                                  1,
+                                                  request.data);
+
+            if (ret != 0) {
+                goto fail2;
+            }
+
+            ++*num_buffers;
+
+            break;
+        case vigsp_cmd_solid_fill:
+            ret = vigs_execbuffer_validate_buffer(vigs_dev,
+                                                  &(*buffers)[*num_buffers],
+                                                  list,
+                                                  request.solid_fill->sfc_id,
+                                                  request_header->cmd,
+                                                  0,
+                                                  request.data);
+
+            if (ret != 0) {
+                goto fail2;
+            }
+
+            ++*num_buffers;
+
+            break;
+        default:
+            break;
+        }
+
+        request_header =
+            (struct vigsp_cmd_request_header*)(request.data +
+                                               request_header->size);
+    }
+
+    mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+
+    return 0;
+
+fail2:
+    while (--*num_buffers >= 0) {
+        vigs_execbuffer_clear_validation(&(*buffers)[*num_buffers]);
+    }
+    mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+    kfree(*buffers);
+fail1:
+    *buffers = NULL;
+
+    return ret;
+}
+
+void vigs_execbuffer_process_buffers(struct vigs_execbuffer *execbuffer,
+                                     struct vigs_validate_buffer *buffers,
+                                     int num_buffers)
+{
+    union vigs_request request;
+    struct vigs_gem_object *gem;
+    struct vigs_surface *sfc;
+    int i;
+
+    for (i = 0; i < num_buffers; ++i) {
+        request.data = buffers[i].data;
+        gem = bo_to_vigs_gem(buffers[i].base.bo);
+        sfc = vigs_gem_to_vigs_surface(gem);
+
+        switch (buffers[i].cmd) {
+        case vigsp_cmd_update_vram:
+            if (vigs_gem_in_vram(&sfc->gem)) {
+                if (vigs_surface_need_vram_update(sfc)) {
+                    request.update_vram->offset = vigs_gem_offset(&sfc->gem);
+                    sfc->is_gpu_dirty = false;
+                } else {
+                    DRM_DEBUG_DRIVER("Surface %u doesn't need to be updated, ignoring update_vram\n",
+                                     request.update_vram->sfc_id);
+                    request.update_vram->sfc_id = 0;
+                }
+            } else {
+                DRM_DEBUG_DRIVER("Surface %u not in VRAM, ignoring update_vram\n",
+                                 request.update_vram->sfc_id);
+                request.update_vram->sfc_id = 0;
+            }
+            break;
+        case vigsp_cmd_update_gpu:
+            if (vigs_gem_in_vram(&sfc->gem)) {
+                if (vigs_surface_need_gpu_update(sfc)) {
+                    request.update_gpu->offset = vigs_gem_offset(&sfc->gem);
+                    sfc->is_gpu_dirty = false;
+                } else {
+                    DRM_DEBUG_DRIVER("Surface %u doesn't need to be updated, ignoring update_gpu\n",
+                                     request.update_gpu->sfc_id);
+                    request.update_gpu->sfc_id = 0;
+                }
+            } else {
+                DRM_DEBUG_DRIVER("Surface %u not in VRAM, ignoring update_gpu\n",
+                                 request.update_gpu->sfc_id);
+                request.update_gpu->sfc_id = 0;
+            }
+            break;
+        case vigsp_cmd_copy:
+            if (buffers[i].which && vigs_gem_in_vram(&sfc->gem)) {
+                sfc->is_gpu_dirty = true;
+            }
+            break;
+        case vigsp_cmd_solid_fill:
+            if (vigs_gem_in_vram(&sfc->gem)) {
+                sfc->is_gpu_dirty = true;
+            }
+            break;
+        default:
+            break;
+        }
+    }
+}
+
+void vigs_execbuffer_fence(struct vigs_execbuffer *execbuffer,
+                           struct vigs_fence *fence)
+{
+    struct vigsp_cmd_batch_header *batch_header = execbuffer->gem.kptr;
+
+    batch_header->fence_seq = fence->seq;
+}
+
+void vigs_execbuffer_clear_validations(struct vigs_execbuffer *execbuffer,
+                                       struct vigs_validate_buffer *buffers,
+                                       int num_buffers)
+{
+    struct vigs_device *vigs_dev = execbuffer->gem.base.dev->dev_private;
+    int i;
+
+    mutex_lock(&vigs_dev->drm_dev->struct_mutex);
+
+    for (i = 0; i < num_buffers; ++i) {
+        vigs_execbuffer_clear_validation(&buffers[i]);
+    }
+
+    mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+
+    kfree(buffers);
+}
+
 int vigs_execbuffer_create_ioctl(struct drm_device *drm_dev,
                                  void *data,
                                  struct drm_file *file_priv)
@@ -70,3 +399,102 @@ int vigs_execbuffer_create_ioctl(struct drm_device *drm_dev,
 
     return ret;
 }
+
+int vigs_execbuffer_exec_ioctl(struct drm_device *drm_dev,
+                               void *data,
+                               struct drm_file *file_priv)
+{
+    struct vigs_device *vigs_dev = drm_dev->dev_private;
+    struct drm_vigs_exec *args = data;
+    struct drm_gem_object *gem;
+    struct vigs_gem_object *vigs_gem;
+    struct vigs_execbuffer *execbuffer;
+    struct list_head list;
+    struct vigs_validate_buffer *buffers;
+    int num_buffers = 0;
+    struct vigs_fence *fence = NULL;
+    bool sync = false;
+    int ret = 0;
+
+    INIT_LIST_HEAD(&list);
+
+    gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
+
+    if (gem == NULL) {
+        ret = -ENOENT;
+        goto out1;
+    }
+
+    vigs_gem = gem_to_vigs_gem(gem);
+
+    if (vigs_gem->type != VIGS_GEM_TYPE_EXECBUFFER) {
+        ret = -ENOENT;
+        goto out2;
+    }
+
+    execbuffer = vigs_gem_to_vigs_execbuffer(vigs_gem);
+
+    vigs_gem_reserve(vigs_gem);
+
+    /*
+     * Never unmap for optimization, but we got to be careful,
+     * worst case scenario is when whole RAM BAR is mapped into kernel.
+     */
+    ret = vigs_gem_kmap(vigs_gem);
+
+    if (ret != 0) {
+        vigs_gem_unreserve(vigs_gem);
+        goto out2;
+    }
+
+    vigs_gem_unreserve(vigs_gem);
+
+    ret = vigs_execbuffer_validate_buffers(execbuffer,
+                                           &list,
+                                           &buffers,
+                                           &num_buffers,
+                                           &sync);
+
+    if (ret != 0) {
+        goto out2;
+    }
+
+    if (list_empty(&list)) {
+        vigs_comm_exec(vigs_dev->comm, execbuffer);
+    } else {
+        ret = ttm_eu_reserve_buffers(&list);
+
+        if (ret != 0) {
+            ttm_eu_backoff_reservation(&list);
+            goto out3;
+        }
+
+        ret = vigs_fence_create(vigs_dev->fenceman, &fence);
+
+        if (ret != 0) {
+            ttm_eu_backoff_reservation(&list);
+            goto out3;
+        }
+
+        vigs_execbuffer_process_buffers(execbuffer, buffers, num_buffers);
+
+        vigs_execbuffer_fence(execbuffer, fence);
+
+        vigs_comm_exec(vigs_dev->comm, execbuffer);
+
+        ttm_eu_fence_buffer_objects(&list, fence);
+
+        if (sync) {
+            vigs_fence_wait(fence, false);
+        }
+
+        vigs_fence_unref(fence);
+    }
+
+out3:
+    vigs_execbuffer_clear_validations(execbuffer, buffers, num_buffers);
+out2:
+    drm_gem_object_unreference_unlocked(gem);
+out1:
+    return ret;
+}
index 13bb183fe7a4711eb4a513c7bbabb6777f1d16e5..96af8a1ba1b745d0d0f99092799bec9a57de9569 100644 (file)
@@ -3,6 +3,21 @@
 
 #include "drmP.h"
 #include "vigs_gem.h"
+#include "vigs_protocol.h"
+#include <ttm/ttm_execbuf_util.h>
+
+struct vigs_fence;
+
+struct vigs_validate_buffer
+{
+    struct ttm_validate_buffer base;
+
+    vigsp_cmd cmd;
+
+    int which;
+
+    void *data;
+};
 
 struct vigs_execbuffer
 {
@@ -22,6 +37,23 @@ int vigs_execbuffer_create(struct vigs_device *vigs_dev,
                            bool kernel,
                            struct vigs_execbuffer **execbuffer);
 
+int vigs_execbuffer_validate_buffers(struct vigs_execbuffer *execbuffer,
+                                     struct list_head* list,
+                                     struct vigs_validate_buffer **buffers,
+                                     int *num_buffers,
+                                     bool *sync);
+
+void vigs_execbuffer_process_buffers(struct vigs_execbuffer *execbuffer,
+                                     struct vigs_validate_buffer *buffers,
+                                     int num_buffers);
+
+void vigs_execbuffer_fence(struct vigs_execbuffer *execbuffer,
+                           struct vigs_fence *fence);
+
+void vigs_execbuffer_clear_validations(struct vigs_execbuffer *execbuffer,
+                                       struct vigs_validate_buffer *buffers,
+                                       int num_buffers);
+
 /*
  * IOCTLs
  * @{
@@ -31,6 +63,10 @@ int vigs_execbuffer_create_ioctl(struct drm_device *drm_dev,
                                  void *data,
                                  struct drm_file *file_priv);
 
+int vigs_execbuffer_exec_ioctl(struct drm_device *drm_dev,
+                               void *data,
+                               struct drm_file *file_priv);
+
 /*
  * @}
  */
diff --git a/drivers/gpu/drm/vigs/vigs_fence.c b/drivers/gpu/drm/vigs/vigs_fence.c
new file mode 100644 (file)
index 0000000..fbbfe14
--- /dev/null
@@ -0,0 +1,305 @@
+#include "vigs_fence.h"
+#include "vigs_fenceman.h"
+#include "vigs_file.h"
+#include "vigs_device.h"
+#include "vigs_comm.h"
+#include <drm/vigs_drm.h>
+
+static void vigs_fence_cleanup(struct vigs_fence *fence)
+{
+}
+
+static void vigs_fence_destroy(struct vigs_fence *fence)
+{
+    vigs_fence_cleanup(fence);
+    kfree(fence);
+}
+
+static void vigs_user_fence_destroy(struct vigs_fence *fence)
+{
+    struct vigs_user_fence *user_fence = vigs_fence_to_vigs_user_fence(fence);
+
+    vigs_fence_cleanup(&user_fence->fence);
+    kfree(user_fence);
+}
+
+static void vigs_fence_release_locked(struct kref *kref)
+{
+    struct vigs_fence *fence = kref_to_vigs_fence(kref);
+
+    DRM_DEBUG_DRIVER("Fence destroyed (seq = %u, signaled = %u)\n",
+                     fence->seq,
+                     fence->signaled);
+
+    list_del_init(&fence->list);
+    fence->destroy(fence);
+}
+
+static void vigs_user_fence_refcount_release(struct ttm_base_object **base)
+{
+    struct ttm_base_object *tmp = *base;
+    struct vigs_user_fence *user_fence = base_to_vigs_user_fence(tmp);
+
+    vigs_fence_unref(&user_fence->fence);
+    *base = NULL;
+}
+
+static void vigs_fence_init(struct vigs_fence *fence,
+                            struct vigs_fenceman *fenceman,
+                            void (*destroy)(struct vigs_fence*))
+{
+    unsigned long flags;
+
+    kref_init(&fence->kref);
+    INIT_LIST_HEAD(&fence->list);
+    fence->fenceman = fenceman;
+    fence->signaled = false;
+    init_waitqueue_head(&fence->wait);
+    fence->destroy = destroy;
+
+    spin_lock_irqsave(&fenceman->lock, flags);
+
+    fence->seq = vigs_fence_seq_next(fenceman->seq);
+    fenceman->seq = fence->seq;
+
+    list_add_tail(&fence->list, &fenceman->fence_list);
+
+    spin_unlock_irqrestore(&fenceman->lock, flags);
+
+    DRM_DEBUG_DRIVER("Fence created (seq = %u)\n", fence->seq);
+}
+
+int vigs_fence_create(struct vigs_fenceman *fenceman,
+                      struct vigs_fence **fence)
+{
+    int ret = 0;
+
+    *fence = kzalloc(sizeof(**fence), GFP_KERNEL);
+
+    if (!*fence) {
+        ret = -ENOMEM;
+        goto fail1;
+    }
+
+    vigs_fence_init(*fence, fenceman, &vigs_fence_destroy);
+
+    return 0;
+
+fail1:
+    *fence = NULL;
+
+    return ret;
+}
+
+int vigs_user_fence_create(struct vigs_fenceman *fenceman,
+                           struct drm_file *file_priv,
+                           struct vigs_user_fence **user_fence,
+                           uint32_t *handle)
+{
+    struct vigs_file *vigs_file = file_priv->driver_priv;
+    int ret = 0;
+
+    *user_fence = kzalloc(sizeof(**user_fence), GFP_KERNEL);
+
+    if (!*user_fence) {
+        ret = -ENOMEM;
+        goto fail1;
+    }
+
+    vigs_fence_init(&(*user_fence)->fence, fenceman, &vigs_user_fence_destroy);
+
+    ret = ttm_base_object_init(vigs_file->obj_file,
+                               &(*user_fence)->base, false,
+                               VIGS_FENCE_TYPE,
+                               &vigs_user_fence_refcount_release,
+                               NULL);
+
+    if (ret != 0) {
+        goto fail2;
+    }
+
+    /*
+     * For ttm_base_object.
+     */
+    vigs_fence_ref(&(*user_fence)->fence);
+
+    *handle = (*user_fence)->base.hash.key;
+
+    return 0;
+
+fail2:
+    vigs_fence_cleanup(&(*user_fence)->fence);
+    kfree(*user_fence);
+fail1:
+    *user_fence = NULL;
+
+    return ret;
+}
+
+int vigs_fence_wait(struct vigs_fence *fence, bool interruptible)
+{
+    long ret = 0;
+
+    if (vigs_fence_signaled(fence)) {
+        DRM_DEBUG_DRIVER("Fence wait (seq = %u, signaled = %u)\n",
+                         fence->seq,
+                         fence->signaled);
+        return 0;
+    }
+
+    DRM_DEBUG_DRIVER("Fence wait (seq = %u)\n", fence->seq);
+
+    if (interruptible) {
+        ret = wait_event_interruptible(fence->wait, vigs_fence_signaled(fence));
+    } else {
+        wait_event(fence->wait, vigs_fence_signaled(fence));
+    }
+
+    if (ret != 0) {
+        DRM_INFO("Fence wait interrupted (seq = %u) = %ld\n", fence->seq, ret);
+    } else {
+        DRM_DEBUG_DRIVER("Fence wait done (seq = %u)\n", fence->seq);
+    }
+
+    return ret;
+}
+
+bool vigs_fence_signaled(struct vigs_fence *fence)
+{
+    unsigned long flags;
+    bool signaled;
+
+    spin_lock_irqsave(&fence->fenceman->lock, flags);
+
+    signaled = fence->signaled;
+
+    spin_unlock_irqrestore(&fence->fenceman->lock, flags);
+
+    return signaled;
+}
+
+void vigs_fence_ref(struct vigs_fence *fence)
+{
+    if (unlikely(!fence)) {
+        return;
+    }
+
+    kref_get(&fence->kref);
+}
+
+void vigs_fence_unref(struct vigs_fence *fence)
+{
+    struct vigs_fenceman *fenceman;
+
+    if (unlikely(!fence)) {
+        return;
+    }
+
+    fenceman = fence->fenceman;
+
+    spin_lock_irq(&fenceman->lock);
+    BUG_ON(atomic_read(&fence->kref.refcount) == 0);
+    kref_put(&fence->kref, vigs_fence_release_locked);
+    spin_unlock_irq(&fenceman->lock);
+}
+
+int vigs_fence_create_ioctl(struct drm_device *drm_dev,
+                            void *data,
+                            struct drm_file *file_priv)
+{
+    struct vigs_device *vigs_dev = drm_dev->dev_private;
+    struct vigs_file *vigs_file = file_priv->driver_priv;
+    struct drm_vigs_create_fence *args = data;
+    struct vigs_user_fence *user_fence;
+    uint32_t handle;
+    int ret;
+
+    ret = vigs_user_fence_create(vigs_dev->fenceman,
+                                 file_priv,
+                                 &user_fence,
+                                 &handle);
+
+    if (ret != 0) {
+        goto out;
+    }
+
+    if (args->send) {
+        ret = vigs_comm_fence(vigs_dev->comm, &user_fence->fence);
+
+        if (ret != 0) {
+            ttm_ref_object_base_unref(vigs_file->obj_file,
+                                      handle,
+                                      TTM_REF_USAGE);
+            goto out;
+        }
+    }
+
+    args->handle = handle;
+    args->seq = user_fence->fence.seq;
+
+out:
+    vigs_fence_unref(&user_fence->fence);
+
+    return ret;
+}
+
+int vigs_fence_wait_ioctl(struct drm_device *drm_dev,
+                          void *data,
+                          struct drm_file *file_priv)
+{
+    struct vigs_file *vigs_file = file_priv->driver_priv;
+    struct drm_vigs_fence_wait *args = data;
+    struct ttm_base_object *base;
+    struct vigs_user_fence *user_fence;
+    int ret;
+
+    base = ttm_base_object_lookup(vigs_file->obj_file, args->handle);
+
+    if (!base) {
+        return -ENOENT;
+    }
+
+    user_fence = base_to_vigs_user_fence(base);
+
+    ret = vigs_fence_wait(&user_fence->fence, true);
+
+    ttm_base_object_unref(&base);
+
+    return ret;
+}
+
+int vigs_fence_signaled_ioctl(struct drm_device *drm_dev,
+                              void *data,
+                              struct drm_file *file_priv)
+{
+    struct vigs_file *vigs_file = file_priv->driver_priv;
+    struct drm_vigs_fence_signaled *args = data;
+    struct ttm_base_object *base;
+    struct vigs_user_fence *user_fence;
+
+    base = ttm_base_object_lookup(vigs_file->obj_file, args->handle);
+
+    if (!base) {
+        return -ENOENT;
+    }
+
+    user_fence = base_to_vigs_user_fence(base);
+
+    args->signaled = vigs_fence_signaled(&user_fence->fence);
+
+    ttm_base_object_unref(&base);
+
+    return 0;
+}
+
+int vigs_fence_unref_ioctl(struct drm_device *drm_dev,
+                           void *data,
+                           struct drm_file *file_priv)
+{
+    struct vigs_file *vigs_file = file_priv->driver_priv;
+    struct drm_vigs_fence_unref *args = data;
+
+    return ttm_ref_object_base_unref(vigs_file->obj_file,
+                                     args->handle,
+                                     TTM_REF_USAGE);
+}
diff --git a/drivers/gpu/drm/vigs/vigs_fence.h b/drivers/gpu/drm/vigs/vigs_fence.h
new file mode 100644 (file)
index 0000000..c0c41be
--- /dev/null
@@ -0,0 +1,123 @@
+#ifndef _VIGS_FENCE_H_
+#define _VIGS_FENCE_H_
+
+#include "drmP.h"
+#include <ttm/ttm_object.h>
+
+#define VIGS_FENCE_TYPE ttm_driver_type2
+
+struct vigs_fenceman;
+
+struct vigs_fence
+{
+    struct kref kref;
+
+    struct list_head list;
+
+    struct vigs_fenceman *fenceman;
+
+    uint32_t seq;
+
+    bool signaled;
+
+    wait_queue_head_t wait;
+
+    void (*destroy)(struct vigs_fence *fence);
+};
+
+/*
+ * Users can access fences via TTM base object mechanism,
+ * thus, we need to wrap vigs_fence into vigs_user_fence because
+ * not every fence object needs to be referenced from user space.
+ * So no point in always having struct ttm_base_object inside vigs_fence.
+ */
+
+struct vigs_user_fence
+{
+    struct ttm_base_object base;
+
+    struct vigs_fence fence;
+};
+
+static inline struct vigs_fence *kref_to_vigs_fence(struct kref *kref)
+{
+    return container_of(kref, struct vigs_fence, kref);
+}
+
+static inline struct vigs_user_fence *vigs_fence_to_vigs_user_fence(struct vigs_fence *fence)
+{
+    return container_of(fence, struct vigs_user_fence, fence);
+}
+
+static inline struct vigs_user_fence *base_to_vigs_user_fence(struct ttm_base_object *base)
+{
+    return container_of(base, struct vigs_user_fence, base);
+}
+
+static inline uint32_t vigs_fence_seq_next(uint32_t seq)
+{
+    if (++seq == 0) {
+        ++seq;
+    }
+    return seq;
+}
+
+#define vigs_fence_seq_num_after(a, b) \
+    (typecheck(u32, a) && typecheck(u32, b) && ((s32)(b) - (s32)(a) < 0))
+
+#define vigs_fence_seq_num_before(a, b) vigs_fence_seq_num_after(b, a)
+
+#define vigs_fence_seq_num_after_eq(a, b)  \
+    ( typecheck(u32, a) && typecheck(u32, b) && \
+      ((s32)(a) - (s32)(b) >= 0) )
+
+#define vigs_fence_seq_num_before_eq(a, b) vigs_fence_seq_num_after_eq(b, a)
+
+int vigs_fence_create(struct vigs_fenceman *fenceman,
+                      struct vigs_fence **fence);
+
+int vigs_user_fence_create(struct vigs_fenceman *fenceman,
+                           struct drm_file *file_priv,
+                           struct vigs_user_fence **user_fence,
+                           uint32_t *handle);
+
+int vigs_fence_wait(struct vigs_fence *fence, bool interruptible);
+
+bool vigs_fence_signaled(struct vigs_fence *fence);
+
+/*
+ * Passing NULL won't hurt, this is for convenience.
+ */
+void vigs_fence_ref(struct vigs_fence *fence);
+
+/*
+ * Passing NULL won't hurt, this is for convenience.
+ */
+void vigs_fence_unref(struct vigs_fence *fence);
+
+/*
+ * IOCTLs
+ * @{
+ */
+
+int vigs_fence_create_ioctl(struct drm_device *drm_dev,
+                            void *data,
+                            struct drm_file *file_priv);
+
+int vigs_fence_wait_ioctl(struct drm_device *drm_dev,
+                          void *data,
+                          struct drm_file *file_priv);
+
+int vigs_fence_signaled_ioctl(struct drm_device *drm_dev,
+                              void *data,
+                              struct drm_file *file_priv);
+
+int vigs_fence_unref_ioctl(struct drm_device *drm_dev,
+                           void *data,
+                           struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
+#endif
diff --git a/drivers/gpu/drm/vigs/vigs_fenceman.c b/drivers/gpu/drm/vigs/vigs_fenceman.c
new file mode 100644 (file)
index 0000000..c551852
--- /dev/null
@@ -0,0 +1,65 @@
+#include "vigs_fenceman.h"
+#include "vigs_fence.h"
+
+int vigs_fenceman_create(struct vigs_fenceman **fenceman)
+{
+    int ret = 0;
+
+    DRM_DEBUG_DRIVER("enter\n");
+
+    *fenceman = kzalloc(sizeof(**fenceman), GFP_KERNEL);
+
+    if (!*fenceman) {
+        ret = -ENOMEM;
+        goto fail1;
+    }
+
+    spin_lock_init(&(*fenceman)->lock);
+    INIT_LIST_HEAD(&(*fenceman)->fence_list);
+    (*fenceman)->seq = UINT_MAX;
+
+    return 0;
+
+fail1:
+    *fenceman = NULL;
+
+    return ret;
+}
+
+void vigs_fenceman_destroy(struct vigs_fenceman *fenceman)
+{
+    unsigned long flags;
+    bool fence_list_empty;
+
+    DRM_DEBUG_DRIVER("enter\n");
+
+    spin_lock_irqsave(&fenceman->lock, flags);
+    fence_list_empty = list_empty(&fenceman->fence_list);
+    spin_unlock_irqrestore(&fenceman->lock, flags);
+
+    BUG_ON(!fence_list_empty);
+
+    kfree(fenceman);
+}
+
+void vigs_fenceman_ack(struct vigs_fenceman *fenceman,
+                       uint32_t lower, uint32_t upper)
+{
+    unsigned long flags;
+    struct vigs_fence *fence, *tmp;
+
+    spin_lock_irqsave(&fenceman->lock, flags);
+
+    list_for_each_entry_safe(fence, tmp, &fenceman->fence_list, list) {
+        if (vigs_fence_seq_num_after_eq(fence->seq, lower) &&
+            vigs_fence_seq_num_before_eq(fence->seq, upper)) {
+            DRM_DEBUG_DRIVER("Fence signaled (seq = %u)\n",
+                             fence->seq);
+            list_del_init(&fence->list);
+            fence->signaled = true;
+            wake_up_all(&fence->wait);
+        }
+    }
+
+    spin_unlock_irqrestore(&fenceman->lock, flags);
+}
diff --git a/drivers/gpu/drm/vigs/vigs_fenceman.h b/drivers/gpu/drm/vigs/vigs_fenceman.h
new file mode 100644 (file)
index 0000000..e6e1028
--- /dev/null
@@ -0,0 +1,47 @@
+#ifndef _VIGS_FENCEMAN_H_
+#define _VIGS_FENCEMAN_H_
+
+#include "drmP.h"
+
+/*
+ * This is fence manager for VIGS. It's responsible for the following:
+ * + Fence bookkeeping.
+ * + Fence sequence number management and IRQ processing.
+ */
+
+struct vigs_fenceman
+{
+    /*
+     * Lock that's used to guard all data inside
+     * fence manager and fence objects. Don't confuse it
+     * with struct ttm_bo_device::fence_lock, that lock
+     * is used to work with TTM sync objects, i.e. it's more
+     * "high level".
+     */
+    spinlock_t lock;
+
+    /*
+     * List of currently pending fences.
+     */
+    struct list_head fence_list;
+
+    /*
+     * Current sequence number, new fence should be
+     * assigned (seq + 1).
+     * Note! Sequence numbers are always non-0, 0 is
+     * a special value that tells GPU not to fence things.
+     */
+    uint32_t seq;
+};
+
+int vigs_fenceman_create(struct vigs_fenceman **fenceman);
+
+void vigs_fenceman_destroy(struct vigs_fenceman *fenceman);
+
+/*
+ * Can be called from IRQ handler.
+ */
+void vigs_fenceman_ack(struct vigs_fenceman *fenceman,
+                       uint32_t lower, uint32_t upper);
+
+#endif
diff --git a/drivers/gpu/drm/vigs/vigs_file.c b/drivers/gpu/drm/vigs/vigs_file.c
new file mode 100644 (file)
index 0000000..eef78de
--- /dev/null
@@ -0,0 +1,37 @@
+#include "vigs_file.h"
+#include "vigs_device.h"
+
+int vigs_file_create(struct vigs_device *vigs_dev,
+                     struct vigs_file **vigs_file)
+{
+    int ret = 0;
+
+    *vigs_file = kzalloc(sizeof(**vigs_file), GFP_KERNEL);
+
+    if (!*vigs_file) {
+        ret = -ENOMEM;
+        goto fail1;
+    }
+
+    (*vigs_file)->obj_file = ttm_object_file_init(vigs_dev->obj_dev, 10);
+
+    if (!(*vigs_file)->obj_file) {
+        ret = -ENOMEM;
+        goto fail2;
+    }
+
+    return 0;
+
+fail2:
+    kfree(*vigs_file);
+fail1:
+    *vigs_file = NULL;
+
+    return ret;
+}
+
+void vigs_file_destroy(struct vigs_file *vigs_file)
+{
+    ttm_object_file_release(&vigs_file->obj_file);
+    kfree(vigs_file);
+}
diff --git a/drivers/gpu/drm/vigs/vigs_file.h b/drivers/gpu/drm/vigs/vigs_file.h
new file mode 100644 (file)
index 0000000..45f16f8
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef _VIGS_FILE_H_
+#define _VIGS_FILE_H_
+
+#include "drmP.h"
+#include <ttm/ttm_object.h>
+
+struct vigs_device;
+
+struct vigs_file
+{
+    struct ttm_object_file *obj_file;
+};
+
+int vigs_file_create(struct vigs_device *vigs_dev,
+                     struct vigs_file **vigs_file);
+
+void vigs_file_destroy(struct vigs_file *vigs_file);
+
+#endif
index 57d0e32ad70d07781aade91a93bde4328ec6e77a..db632bd79ad9c3db8da562b24b72c5be0e0f39cc 100644 (file)
@@ -47,8 +47,6 @@ int vigs_gem_init(struct vigs_gem_object *vigs_gem,
         return -EINVAL;
     }
 
-    INIT_LIST_HEAD(&vigs_gem->list);
-
     memset(&placement, 0, sizeof(placement));
 
     placement.placement = placements;
@@ -246,6 +244,19 @@ int vigs_gem_in_vram(struct vigs_gem_object *vigs_gem)
     return vigs_gem->bo.mem.mem_type == TTM_PL_VRAM;
 }
 
+int vigs_gem_wait(struct vigs_gem_object *vigs_gem)
+{
+    int ret;
+
+    spin_lock(&vigs_gem->bo.bdev->fence_lock);
+
+    ret = ttm_bo_wait(&vigs_gem->bo, true, false, false);
+
+    spin_unlock(&vigs_gem->bo.bdev->fence_lock);
+
+    return ret;
+}
+
 void vigs_gem_free_object(struct drm_gem_object *gem)
 {
     struct vigs_gem_object *vigs_gem = gem_to_vigs_gem(gem);
@@ -333,6 +344,34 @@ int vigs_gem_map_ioctl(struct drm_device *drm_dev,
     return 0;
 }
 
+int vigs_gem_wait_ioctl(struct drm_device *drm_dev,
+                        void *data,
+                        struct drm_file *file_priv)
+{
+    struct drm_vigs_gem_wait *args = data;
+    struct drm_gem_object *gem;
+    struct vigs_gem_object *vigs_gem;
+    int ret;
+
+    gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
+
+    if (gem == NULL) {
+        return -ENOENT;
+    }
+
+    vigs_gem = gem_to_vigs_gem(gem);
+
+    vigs_gem_reserve(vigs_gem);
+
+    ret = vigs_gem_wait(vigs_gem);
+
+    vigs_gem_unreserve(vigs_gem);
+
+    drm_gem_object_unreference_unlocked(gem);
+
+    return ret;
+}
+
 int vigs_gem_dumb_create(struct drm_file *file_priv,
                          struct drm_device *drm_dev,
                          struct drm_mode_create_dumb *args)
index 0b521bdc58c62f80a9ed2cce0cd2d68e82d8f286..ccfbff70ac7a92e4a1a511d7847f20298ce794e0 100644 (file)
@@ -24,12 +24,6 @@ struct vigs_gem_object
      */
     bool freed;
 
-    /*
-     * Use it only when this GEM is reserved. This makes it easier
-     * to reserve a set of GEMs and then unreserve them later.
-     */
-    struct list_head list;
-
     enum ttm_object_type type;
 
     /*
@@ -160,6 +154,8 @@ void vigs_gem_kunmap(struct vigs_gem_object *vigs_gem);
  */
 int vigs_gem_in_vram(struct vigs_gem_object *vigs_gem);
 
+int vigs_gem_wait(struct vigs_gem_object *vigs_gem);
+
 /*
  * @}
  */
@@ -192,6 +188,10 @@ int vigs_gem_map_ioctl(struct drm_device *drm_dev,
                        void *data,
                        struct drm_file *file_priv);
 
+int vigs_gem_wait_ioctl(struct drm_device *drm_dev,
+                        void *data,
+                        struct drm_file *file_priv);
+
 /*
  * @}
  */
index 1305fc0283540595296865b55082510b9a6ae898..6ac7fd388f8ae8de96dcebe1e5c6a308c53142eb 100644 (file)
@@ -1,6 +1,7 @@
 #include "vigs_irq.h"
 #include "vigs_device.h"
 #include "vigs_regs.h"
+#include "vigs_fenceman.h"
 
 static void vigs_finish_pageflips(struct vigs_device *vigs_dev)
 {
@@ -87,30 +88,46 @@ irqreturn_t vigs_irq_handler(DRM_IRQ_ARGS)
 {
     struct drm_device *drm_dev = (struct drm_device*)arg;
     struct vigs_device *vigs_dev = drm_dev->dev_private;
-    u32 value;
+    u32 int_value;
+    irqreturn_t ret = IRQ_NONE;
 
-    value = readl(vigs_dev->io_map->handle + VIGS_REG_INT);
+    int_value = readl(vigs_dev->io_map->handle + VIGS_REG_INT);
+
+    if ((int_value & (VIGS_REG_INT_VBLANK_PENDING | VIGS_REG_INT_FENCE_ACK_PENDING)) != 0) {
+        /*
+         * Clear the interrupt first in order
+         * not to stall the hardware.
+         */
+
+        writel(int_value, vigs_dev->io_map->handle + VIGS_REG_INT);
 
-    if ((value & VIGS_REG_INT_VBLANK_PENDING) == 0) {
-        return IRQ_NONE;
+        ret = IRQ_HANDLED;
     }
 
-    /*
-     * Clear the interrupt first in order
-     * not to stall the hardware.
-     */
+    if ((int_value & VIGS_REG_INT_FENCE_ACK_PENDING) != 0) {
+        u32 lower, upper;
 
-    value &= ~VIGS_REG_INT_VBLANK_PENDING;
+        while (1) {
+            spin_lock(&vigs_dev->irq_lock);
 
-    writel(value, vigs_dev->io_map->handle + VIGS_REG_INT);
+            lower = readl(vigs_dev->io_map->handle + VIGS_REG_FENCE_LOWER);
+            upper = readl(vigs_dev->io_map->handle + VIGS_REG_FENCE_UPPER);
 
-    /*
-     * Handle VBLANK.
-     */
+            spin_unlock(&vigs_dev->irq_lock);
 
-    drm_handle_vblank(drm_dev, 0);
+            if (lower) {
+                vigs_fenceman_ack(vigs_dev->fenceman, lower, upper);
+            } else {
+                break;
+            }
+        }
+    }
 
-    vigs_finish_pageflips(vigs_dev);
+    if ((int_value & VIGS_REG_INT_VBLANK_PENDING) != 0) {
+        drm_handle_vblank(drm_dev, 0);
+
+        vigs_finish_pageflips(vigs_dev);
+    }
 
-    return IRQ_HANDLED;
+    return ret;
 }
index faba664a75fef6aa9cb4d2af5e8c8c8aaf8d8573..02e4af3ca58a3f20b45337e90407db3d0d46ec14 100644 (file)
@@ -1,4 +1,5 @@
 #include "vigs_mman.h"
+#include "vigs_fence.h"
 #include <ttm/ttm_placement.h>
 
 /*
@@ -127,10 +128,10 @@ static struct ttm_backend_func vigs_ttm_backend_func = {
     .destroy = &vigs_ttm_backend_destroy,
 };
 
-struct ttm_tt *vigs_ttm_tt_create(struct ttm_bo_device *bo_dev,
-                                  unsigned long size,
-                                  uint32_t page_flags,
-                                  struct page *dummy_read_page)
+static struct ttm_tt *vigs_ttm_tt_create(struct ttm_bo_device *bo_dev,
+                                         unsigned long size,
+                                         uint32_t page_flags,
+                                         struct page *dummy_read_page)
 {
     struct ttm_dma_tt *dma_tt;
 
@@ -265,7 +266,39 @@ static int vigs_ttm_verify_access(struct ttm_buffer_object *bo,
     return 0;
 }
 
-int vigs_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+static bool vigs_ttm_sync_obj_signaled(void *sync_obj, void *sync_arg)
+{
+    return vigs_fence_signaled((struct vigs_fence*)sync_obj);
+}
+
+static int vigs_ttm_sync_obj_wait(void *sync_obj,
+                                  void *sync_arg,
+                                  bool lazy,
+                                  bool interruptible)
+{
+    return vigs_fence_wait((struct vigs_fence*)sync_obj, interruptible);
+}
+
+static int vigs_ttm_sync_obj_flush(void *sync_obj,
+                                   void *sync_arg)
+{
+    return 0;
+}
+
+static void vigs_ttm_sync_obj_unref(void **sync_obj)
+{
+    struct vigs_fence* fence = *sync_obj;
+    vigs_fence_unref(fence);
+    *sync_obj = NULL;
+}
+
+static void *vigs_ttm_sync_obj_ref(void *sync_obj)
+{
+    vigs_fence_ref((struct vigs_fence*)sync_obj);
+    return sync_obj;
+}
+
+static int vigs_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
     u32 placements[1];
     struct ttm_placement placement;
@@ -354,6 +387,11 @@ static struct ttm_bo_driver vigs_ttm_bo_driver =
     .evict_flags = &vigs_ttm_evict_flags,
     .move = &vigs_ttm_move,
     .verify_access = &vigs_ttm_verify_access,
+    .sync_obj_signaled = vigs_ttm_sync_obj_signaled,
+    .sync_obj_wait = vigs_ttm_sync_obj_wait,
+    .sync_obj_flush = vigs_ttm_sync_obj_flush,
+    .sync_obj_unref = vigs_ttm_sync_obj_unref,
+    .sync_obj_ref = vigs_ttm_sync_obj_ref,
     .fault_reserve_notify = &vigs_ttm_fault_reserve_notify,
     .io_mem_reserve = &vigs_ttm_io_mem_reserve,
     .io_mem_free = &vigs_ttm_io_mem_free,
index cd39cc9f6bf8bfda00e46a855116ba816f6a6a7a..a919247177b48081684c28e9e4b396d8c593f29f 100644 (file)
@@ -2,19 +2,13 @@
 #define _VIGS_PROTOCOL_H_
 
 /*
- * VIGS protocol is a multiple request-single response protocol.
- *
- * + Requests come batched.
- * + The response is written after the request batch.
- *
- * Not all commands can be batched, only commands that don't have response
- * data can be batched.
+ * VIGS protocol is a multiple request-no response protocol.
  */
 
 /*
  * Bump this whenever protocol changes.
  */
-#define VIGS_PROTOCOL_VERSION 14
+#define VIGS_PROTOCOL_VERSION 15
 
 typedef signed char vigsp_s8;
 typedef signed short vigsp_s16;
@@ -29,30 +23,36 @@ typedef vigsp_u32 vigsp_bool;
 typedef vigsp_u32 vigsp_surface_id;
 typedef vigsp_u32 vigsp_offset;
 typedef vigsp_u32 vigsp_color;
+typedef vigsp_u32 vigsp_fence_seq;
 
 typedef enum
 {
+    /*
+     * These command are guaranteed to sync on host, i.e.
+     * no fence is required.
+     * @{
+     */
     vigsp_cmd_init = 0x0,
     vigsp_cmd_reset = 0x1,
     vigsp_cmd_exit = 0x2,
-    vigsp_cmd_create_surface = 0x3,
-    vigsp_cmd_destroy_surface = 0x4,
-    vigsp_cmd_set_root_surface = 0x5,
+    vigsp_cmd_set_root_surface = 0x3,
+    /*
+     * @}
+     */
+    /*
+     * These commands are executed asynchronously.
+     * @{
+     */
+    vigsp_cmd_create_surface = 0x4,
+    vigsp_cmd_destroy_surface = 0x5,
     vigsp_cmd_update_vram = 0x6,
     vigsp_cmd_update_gpu = 0x7,
     vigsp_cmd_copy = 0x8,
     vigsp_cmd_solid_fill = 0x9,
-} vigsp_cmd;
-
-typedef enum
-{
     /*
-     * Start from 0x1 to detect host failures on target.
+     * @}
      */
-    vigsp_status_success = 0x1,
-    vigsp_status_bad_call = 0x2,
-    vigsp_status_exec_error = 0x3,
-} vigsp_status;
+} vigsp_cmd;
 
 typedef enum
 {
@@ -89,7 +89,17 @@ struct vigsp_copy
 
 struct vigsp_cmd_batch_header
 {
-    vigsp_u32 num_requests;
+    /*
+     * Fence sequence requested by this batch.
+     * 0 for none.
+     */
+    vigsp_fence_seq fence_seq;
+
+    /*
+     * Batch size starting from batch header.
+     * Can be 0.
+     */
+    vigsp_u32 size;
 };
 
 struct vigsp_cmd_request_header
@@ -102,11 +112,6 @@ struct vigsp_cmd_request_header
     vigsp_u32 size;
 };
 
-struct vigsp_cmd_response_header
-{
-    vigsp_status status;
-};
-
 /*
  * cmd_init
  *
@@ -121,10 +126,6 @@ struct vigsp_cmd_response_header
 struct vigsp_cmd_init_request
 {
     vigsp_u32 client_version;
-};
-
-struct vigsp_cmd_init_response
-{
     vigsp_u32 server_version;
 };
 
index f3c08a62aefea5aeba6503138b74545012ee6f1a..0272f2bc21bec1ca6f4eca0d830fde6fe456b631 100644 (file)
@@ -3,8 +3,11 @@
 
 #define VIGS_REG_EXEC 0
 #define VIGS_REG_INT 8
+#define VIGS_REG_FENCE_LOWER 16
+#define VIGS_REG_FENCE_UPPER 24
 
 #define VIGS_REG_INT_VBLANK_ENABLE 1
 #define VIGS_REG_INT_VBLANK_PENDING 2
+#define VIGS_REG_INT_FENCE_ACK_PENDING 4
 
 #endif
index ad0e618fac2b879069e5887dea28ce60979ee7cf..33829a613171e2383f7573edb039305032d9c36f 100644 (file)
@@ -6,7 +6,7 @@
 /*
  * Version number.
  */
-#define YAGL_VERSION 21
+#define YAGL_VERSION 22
 
 /*
  * Device control codes magic.
index 179ea914606d7dcb284faa45bce8435d16844a1a..f93366f74df5436d2b25fd32e6677ba1fe1eb9dc 100644 (file)
@@ -8,7 +8,7 @@
 /*
  * Bump this whenever driver interface changes.
  */
-#define DRM_VIGS_DRIVER_VERSION 9
+#define DRM_VIGS_DRIVER_VERSION 10
 
 /*
  * Surface access flags.
@@ -46,6 +46,11 @@ struct drm_vigs_gem_map
     unsigned long address;
 };
 
+struct drm_vigs_gem_wait
+{
+    uint32_t handle;
+};
+
 struct drm_vigs_surface_info
 {
     uint32_t handle;
@@ -79,15 +84,43 @@ struct drm_vigs_surface_end_access
     int sync;
 };
 
+struct drm_vigs_create_fence
+{
+    int send;
+    uint32_t handle;
+    uint32_t seq;
+};
+
+struct drm_vigs_fence_wait
+{
+    uint32_t handle;
+};
+
+struct drm_vigs_fence_signaled
+{
+    uint32_t handle;
+    int signaled;
+};
+
+struct drm_vigs_fence_unref
+{
+    uint32_t handle;
+};
+
 #define DRM_VIGS_GET_PROTOCOL_VERSION 0x00
 #define DRM_VIGS_CREATE_SURFACE 0x01
 #define DRM_VIGS_CREATE_EXECBUFFER 0x02
 #define DRM_VIGS_GEM_MAP 0x03
-#define DRM_VIGS_SURFACE_INFO 0x04
-#define DRM_VIGS_EXEC 0x05
-#define DRM_VIGS_SURFACE_SET_GPU_DIRTY 0x06
-#define DRM_VIGS_SURFACE_START_ACCESS 0x07
-#define DRM_VIGS_SURFACE_END_ACCESS 0x08
+#define DRM_VIGS_GEM_WAIT 0x04
+#define DRM_VIGS_SURFACE_INFO 0x05
+#define DRM_VIGS_EXEC 0x06
+#define DRM_VIGS_SURFACE_SET_GPU_DIRTY 0x07
+#define DRM_VIGS_SURFACE_START_ACCESS 0x08
+#define DRM_VIGS_SURFACE_END_ACCESS 0x09
+#define DRM_VIGS_CREATE_FENCE 0x0A
+#define DRM_VIGS_FENCE_WAIT 0x0B
+#define DRM_VIGS_FENCE_SIGNALED 0x0C
+#define DRM_VIGS_FENCE_UNREF 0x0D
 
 #define DRM_IOCTL_VIGS_GET_PROTOCOL_VERSION DRM_IOR(DRM_COMMAND_BASE + \
             DRM_VIGS_GET_PROTOCOL_VERSION, struct drm_vigs_get_protocol_version)
@@ -97,6 +130,8 @@ struct drm_vigs_surface_end_access
             DRM_VIGS_CREATE_EXECBUFFER, struct drm_vigs_create_execbuffer)
 #define DRM_IOCTL_VIGS_GEM_MAP DRM_IOWR(DRM_COMMAND_BASE + \
             DRM_VIGS_GEM_MAP, struct drm_vigs_gem_map)
+#define DRM_IOCTL_VIGS_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + \
+            DRM_VIGS_GEM_WAIT, struct drm_vigs_gem_wait)
 #define DRM_IOCTL_VIGS_SURFACE_INFO DRM_IOWR(DRM_COMMAND_BASE + \
             DRM_VIGS_SURFACE_INFO, struct drm_vigs_surface_info)
 #define DRM_IOCTL_VIGS_EXEC DRM_IOW(DRM_COMMAND_BASE + \
@@ -107,5 +142,13 @@ struct drm_vigs_surface_end_access
             DRM_VIGS_SURFACE_START_ACCESS, struct drm_vigs_surface_start_access)
 #define DRM_IOCTL_VIGS_SURFACE_END_ACCESS DRM_IOW(DRM_COMMAND_BASE + \
             DRM_VIGS_SURFACE_END_ACCESS, struct drm_vigs_surface_end_access)
+#define DRM_IOCTL_VIGS_CREATE_FENCE DRM_IOWR(DRM_COMMAND_BASE + \
+            DRM_VIGS_CREATE_FENCE, struct drm_vigs_create_fence)
+#define DRM_IOCTL_VIGS_FENCE_WAIT DRM_IOW(DRM_COMMAND_BASE + \
+            DRM_VIGS_FENCE_WAIT, struct drm_vigs_fence_wait)
+#define DRM_IOCTL_VIGS_FENCE_SIGNALED DRM_IOWR(DRM_COMMAND_BASE + \
+            DRM_VIGS_FENCE_SIGNALED, struct drm_vigs_fence_signaled)
+#define DRM_IOCTL_VIGS_FENCE_UNREF DRM_IOW(DRM_COMMAND_BASE + \
+            DRM_VIGS_FENCE_UNREF, struct drm_vigs_fence_unref)
 
 #endif