vigs_framebuffer.o \
vigs_comm.o \
vigs_fbdev.o \
- vigs_irq.o
+ vigs_irq.o \
+ vigs_fence.o \
+ vigs_fenceman.o \
+ vigs_file.o
obj-$(CONFIG_DRM_VIGS) += vigs_drm.o
#include "vigs_device.h"
#include "vigs_execbuffer.h"
#include "vigs_regs.h"
+#include "vigs_fence.h"
#include <drm/vigs_drm.h>
static int vigs_comm_alloc(struct vigs_comm *comm,
*ptr = comm->execbuffer->gem.kptr;
- memset(*ptr, 0, vigs_gem_size(&comm->execbuffer->gem));
-
return 0;
}
static int vigs_comm_prepare(struct vigs_comm *comm,
vigsp_cmd cmd,
unsigned long request_size,
- unsigned long response_size,
- void **request,
- void **response)
+ void **request)
{
int ret;
void *ptr;
ret = vigs_comm_alloc(comm,
sizeof(*batch_header) +
sizeof(*request_header) +
- request_size +
- sizeof(struct vigsp_cmd_response_header) +
- response_size,
+ request_size,
&ptr);
if (ret != 0) {
batch_header = ptr;
request_header = (struct vigsp_cmd_request_header*)(batch_header + 1);
- batch_header->num_requests = 1;
+ batch_header->fence_seq = 0;
+ batch_header->size = sizeof(*request_header) + request_size;
request_header->cmd = cmd;
request_header->size = request_size;
*request = (request_header + 1);
}
- if (response) {
- *response = (void*)(request_header + 1) +
- request_size +
- sizeof(struct vigsp_cmd_response_header);
- }
-
return 0;
}
-static void vigs_comm_exec_locked(struct vigs_comm *comm,
- struct vigs_execbuffer *execbuffer)
+static void vigs_comm_exec_internal(struct vigs_comm *comm,
+ struct vigs_execbuffer *execbuffer)
{
writel(vigs_gem_offset(&execbuffer->gem), comm->io_ptr + VIGS_REG_EXEC);
}
-static int vigs_comm_exec_internal(struct vigs_comm *comm)
-{
- struct vigsp_cmd_batch_header *batch_header = comm->execbuffer->gem.kptr;
- struct vigsp_cmd_request_header *request_header =
- (struct vigsp_cmd_request_header*)(batch_header + 1);
- struct vigsp_cmd_response_header *response_header;
- vigsp_u32 i;
-
- for (i = 0; i < batch_header->num_requests; ++i) {
- request_header =
- (struct vigsp_cmd_request_header*)((uint8_t*)(request_header + 1) +
- request_header->size);
- }
-
- response_header = (struct vigsp_cmd_response_header*)request_header;
-
- vigs_comm_exec_locked(comm, comm->execbuffer);
-
- switch (response_header->status) {
- case vigsp_status_success:
- return 0;
- case vigsp_status_bad_call:
- DRM_ERROR("bad host call\n");
- return -EINVAL;
- case vigsp_status_exec_error:
- DRM_ERROR("host exec error\n");
- return -EIO;
- default:
- DRM_ERROR("fatal host error\n");
- return -ENXIO;
- }
-}
-
static int vigs_comm_init(struct vigs_comm *comm)
{
int ret;
struct vigsp_cmd_init_request *request;
- struct vigsp_cmd_init_response *response;
ret = vigs_comm_prepare(comm,
vigsp_cmd_init,
sizeof(*request),
- sizeof(*response),
- (void**)&request,
- (void**)&response);
+ (void**)&request);
if (ret != 0) {
return ret;
}
request->client_version = VIGS_PROTOCOL_VERSION;
+ request->server_version = 0;
- ret = vigs_comm_exec_internal(comm);
+ vigs_comm_exec_internal(comm, comm->execbuffer);
- if (ret != 0) {
- return ret;
- }
-
- if (response->server_version != VIGS_PROTOCOL_VERSION) {
+ if (request->server_version != VIGS_PROTOCOL_VERSION) {
DRM_ERROR("protocol version mismatch, expected %u, actual %u\n",
VIGS_PROTOCOL_VERSION,
- response->server_version);
+ request->server_version);
return -ENODEV;
}
{
int ret;
- ret = vigs_comm_prepare(comm, vigsp_cmd_exit, 0, 0, NULL, NULL);
+ ret = vigs_comm_prepare(comm, vigsp_cmd_exit, 0, NULL);
if (ret != 0) {
return;
}
- vigs_comm_exec_internal(comm);
+ vigs_comm_exec_internal(comm, comm->execbuffer);
}
int vigs_comm_create(struct vigs_device *vigs_dev,
void vigs_comm_exec(struct vigs_comm *comm,
struct vigs_execbuffer *execbuffer)
{
- mutex_lock(&comm->mutex);
- vigs_comm_exec_locked(comm, execbuffer);
- mutex_unlock(&comm->mutex);
+ vigs_comm_exec_internal(comm, execbuffer);
}
int vigs_comm_reset(struct vigs_comm *comm)
mutex_lock(&comm->mutex);
- ret = vigs_comm_prepare(comm, vigsp_cmd_reset, 0, 0, NULL, NULL);
+ ret = vigs_comm_prepare(comm, vigsp_cmd_reset, 0, NULL);
if (ret == 0) {
- ret = vigs_comm_exec_internal(comm);
+ vigs_comm_exec_internal(comm, comm->execbuffer);
}
mutex_unlock(&comm->mutex);
ret = vigs_comm_prepare(comm,
vigsp_cmd_create_surface,
sizeof(*request),
- 0,
- (void**)&request,
- NULL);
+ (void**)&request);
if (ret == 0) {
request->width = width;
request->format = format;
request->id = id;
- ret = vigs_comm_exec_internal(comm);
+ vigs_comm_exec_internal(comm, comm->execbuffer);
}
mutex_unlock(&comm->mutex);
ret = vigs_comm_prepare(comm,
vigsp_cmd_destroy_surface,
sizeof(*request),
- 0,
- (void**)&request,
- NULL);
+ (void**)&request);
if (ret == 0) {
request->id = id;
- ret = vigs_comm_exec_internal(comm);
+ vigs_comm_exec_internal(comm, comm->execbuffer);
}
mutex_unlock(&comm->mutex);
vigsp_offset offset)
{
int ret;
+ struct vigs_fence *fence;
struct vigsp_cmd_set_root_surface_request *request;
DRM_DEBUG_DRIVER("id = %u, offset = %u\n", id, offset);
+ ret = vigs_fence_create(comm->vigs_dev->fenceman, &fence);
+
+ if (ret != 0) {
+ return ret;
+ }
+
mutex_lock(&comm->mutex);
ret = vigs_comm_prepare(comm,
vigsp_cmd_set_root_surface,
sizeof(*request),
- 0,
- (void**)&request,
- NULL);
+ (void**)&request);
if (ret == 0) {
request->id = id;
request->offset = offset;
- ret = vigs_comm_exec_internal(comm);
+ vigs_execbuffer_fence(comm->execbuffer, fence);
+
+ vigs_comm_exec_internal(comm, comm->execbuffer);
}
mutex_unlock(&comm->mutex);
+ if (ret == 0) {
+ vigs_fence_wait(fence, false);
+ }
+
+ vigs_fence_unref(fence);
+
return ret;
}
vigsp_offset offset)
{
int ret;
+ struct vigs_fence *fence;
struct vigsp_cmd_update_vram_request *request;
DRM_DEBUG_DRIVER("id = %u, offset = %u\n", id, offset);
+ ret = vigs_fence_create(comm->vigs_dev->fenceman, &fence);
+
+ if (ret != 0) {
+ return ret;
+ }
+
mutex_lock(&comm->mutex);
ret = vigs_comm_prepare(comm,
vigsp_cmd_update_vram,
sizeof(*request),
- 0,
- (void**)&request,
- NULL);
+ (void**)&request);
if (ret == 0) {
request->sfc_id = id;
request->offset = offset;
- ret = vigs_comm_exec_internal(comm);
+ vigs_execbuffer_fence(comm->execbuffer, fence);
+
+ vigs_comm_exec_internal(comm, comm->execbuffer);
}
mutex_unlock(&comm->mutex);
+ if (ret == 0) {
+ vigs_fence_wait(fence, false);
+ }
+
+ vigs_fence_unref(fence);
+
return ret;
}
vigsp_offset offset)
{
int ret;
+ struct vigs_fence *fence;
struct vigsp_cmd_update_gpu_request *request;
DRM_DEBUG_DRIVER("id = %u, offset = %u\n", id, offset);
+ ret = vigs_fence_create(comm->vigs_dev->fenceman, &fence);
+
+ if (ret != 0) {
+ return ret;
+ }
+
mutex_lock(&comm->mutex);
ret = vigs_comm_prepare(comm,
vigsp_cmd_update_gpu,
sizeof(*request) + sizeof(struct vigsp_rect),
- 0,
- (void**)&request,
- NULL);
+ (void**)&request);
if (ret == 0) {
request->sfc_id = id;
request->entries[0].size.w = width;
request->entries[0].size.h = height;
- ret = vigs_comm_exec_internal(comm);
+ vigs_execbuffer_fence(comm->execbuffer, fence);
+
+ vigs_comm_exec_internal(comm, comm->execbuffer);
}
mutex_unlock(&comm->mutex);
+ if (ret == 0) {
+ vigs_fence_wait(fence, false);
+ }
+
+ vigs_fence_unref(fence);
+
return ret;
}
+int vigs_comm_fence(struct vigs_comm *comm, struct vigs_fence *fence)
+{
+ struct vigsp_cmd_batch_header *batch_header;
+ int ret;
+
+ DRM_DEBUG_DRIVER("seq = %u\n", fence->seq);
+
+ mutex_lock(&comm->mutex);
+
+ ret = vigs_comm_alloc(comm,
+ sizeof(*batch_header),
+ (void**)&batch_header);
+
+ if (ret != 0) {
+ mutex_unlock(&comm->mutex);
+
+ return ret;
+ }
+
+ batch_header->fence_seq = 0;
+ batch_header->size = 0;
+
+ vigs_execbuffer_fence(comm->execbuffer, fence);
+
+ vigs_comm_exec_internal(comm, comm->execbuffer);
+
+ mutex_unlock(&comm->mutex);
+
+ return 0;
+}
+
int vigs_comm_get_protocol_version_ioctl(struct drm_device *drm_dev,
void *data,
struct drm_file *file_priv)
struct drm_file;
struct vigs_device;
struct vigs_execbuffer;
+struct vigs_fence;
struct vigs_comm
{
u32 height,
vigsp_offset offset);
+int vigs_comm_fence(struct vigs_comm *comm, struct vigs_fence *fence);
+
/*
* IOCTLs
* @{
if (ret != 0) {
vigs_framebuffer_unpin(vigs_fb);
+
return ret;
}
#include "vigs_device.h"
#include "vigs_mman.h"
+#include "vigs_fenceman.h"
#include "vigs_crtc.h"
#include "vigs_output.h"
#include "vigs_framebuffer.h"
.cleanup_vma = &vigs_device_mman_cleanup_vma
};
-static struct vigs_surface
- *vigs_device_reference_surface_unlocked(struct vigs_device *vigs_dev,
- vigsp_surface_id sfc_id)
-{
- struct vigs_surface *sfc;
-
- mutex_lock(&vigs_dev->drm_dev->struct_mutex);
-
- mutex_lock(&vigs_dev->surface_idr_mutex);
-
- sfc = idr_find(&vigs_dev->surface_idr, sfc_id);
-
- if (sfc) {
- if (vigs_gem_freed(&sfc->gem)) {
- sfc = NULL;
- } else {
- drm_gem_object_reference(&sfc->gem.base);
- }
- }
-
- mutex_unlock(&vigs_dev->surface_idr_mutex);
-
- mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
-
- return sfc;
-}
-
-static bool vigs_gem_is_reserved(struct list_head *gem_list,
- struct vigs_gem_object *gem)
-{
- struct vigs_gem_object *tmp;
-
- list_for_each_entry(tmp, gem_list, list) {
- if (tmp == gem) {
- return true;
- }
- }
-
- return false;
-}
-
-static struct vigs_surface
- *vigs_surface_reserve(struct vigs_device *vigs_dev,
- struct list_head *gem_list,
- vigsp_surface_id sfc_id)
-{
- struct vigs_surface *sfc =
- vigs_device_reference_surface_unlocked(vigs_dev, sfc_id);
-
- if (!sfc) {
- DRM_ERROR("Surface %u not found\n", sfc_id);
- return NULL;
- }
-
- if (vigs_gem_is_reserved(gem_list, &sfc->gem)) {
- drm_gem_object_unreference_unlocked(&sfc->gem.base);
- } else {
- vigs_gem_reserve(&sfc->gem);
- list_add_tail(&sfc->gem.list, gem_list);
- }
-
- return sfc;
-}
-
-/*
- * 'gem_list' will hold a list of GEMs that should be
- * unreserved and unreferenced after execution.
- */
-static int vigs_device_patch_commands(struct vigs_device *vigs_dev,
- void *data,
- u32 data_size,
- struct list_head* gem_list)
-{
- struct vigsp_cmd_batch_header *batch_header = data;
- struct vigsp_cmd_request_header *request_header =
- (struct vigsp_cmd_request_header*)(batch_header + 1);
- union
- {
- struct vigsp_cmd_update_vram_request *update_vram;
- struct vigsp_cmd_update_gpu_request *update_gpu;
- struct vigsp_cmd_copy_request *copy;
- struct vigsp_cmd_solid_fill_request *solid_fill;
- void *data;
- } request;
- vigsp_u32 i;
- struct vigs_surface *sfc;
- int ret = 0;
-
- /*
- * GEM is always at least PAGE_SIZE long, so don't check
- * if batch header is out of bounds.
- */
-
- for (i = 0; i < batch_header->num_requests; ++i) {
- if (((void*)(request_header) + sizeof(*request_header)) >
- (data + data_size)) {
- DRM_ERROR("request header outside of GEM\n");
- ret = -EINVAL;
- break;
- }
-
- if (((void*)(request_header + 1) + request_header->size) >
- (data + data_size)) {
- DRM_ERROR("request data outside of GEM\n");
- ret = -EINVAL;
- break;
- }
-
- request.data = (request_header + 1);
-
- switch (request_header->cmd) {
- case vigsp_cmd_update_vram:
- sfc = vigs_surface_reserve(vigs_dev,
- gem_list,
- request.update_vram->sfc_id);
- if (!sfc) {
- ret = -EINVAL;
- break;
- }
- if (vigs_gem_in_vram(&sfc->gem)) {
- if (vigs_surface_need_vram_update(sfc)) {
- request.update_vram->offset = vigs_gem_offset(&sfc->gem);
- sfc->is_gpu_dirty = false;
- } else {
- DRM_DEBUG_DRIVER("Surface %u doesn't need to be updated, ignoring update_vram\n",
- request.update_vram->sfc_id);
- request.update_vram->sfc_id = 0;
- }
- } else {
- DRM_DEBUG_DRIVER("Surface %u not in VRAM, ignoring update_vram\n",
- request.update_vram->sfc_id);
- request.update_vram->sfc_id = 0;
- }
- break;
- case vigsp_cmd_update_gpu:
- sfc = vigs_surface_reserve(vigs_dev,
- gem_list,
- request.update_gpu->sfc_id);
- if (!sfc) {
- ret = -EINVAL;
- break;
- }
- if (vigs_gem_in_vram(&sfc->gem)) {
- if (vigs_surface_need_gpu_update(sfc)) {
- request.update_gpu->offset = vigs_gem_offset(&sfc->gem);
- sfc->is_gpu_dirty = false;
- } else {
- DRM_DEBUG_DRIVER("Surface %u doesn't need to be updated, ignoring update_gpu\n",
- request.update_gpu->sfc_id);
- request.update_gpu->sfc_id = 0;
- }
- } else {
- DRM_DEBUG_DRIVER("Surface %u not in VRAM, ignoring update_gpu\n",
- request.update_gpu->sfc_id);
- request.update_gpu->sfc_id = 0;
- }
- break;
- case vigsp_cmd_copy:
- sfc = vigs_surface_reserve(vigs_dev,
- gem_list,
- request.copy->dst_id);
- if (!sfc) {
- ret = -EINVAL;
- break;
- }
- if (vigs_gem_in_vram(&sfc->gem)) {
- sfc->is_gpu_dirty = true;
- }
- break;
- case vigsp_cmd_solid_fill:
- sfc = vigs_surface_reserve(vigs_dev,
- gem_list,
- request.solid_fill->sfc_id);
- if (!sfc) {
- ret = -EINVAL;
- break;
- }
- if (vigs_gem_in_vram(&sfc->gem)) {
- sfc->is_gpu_dirty = true;
- }
- break;
- default:
- break;
- }
-
- request_header =
- (struct vigsp_cmd_request_header*)(request.data +
- request_header->size);
- }
-
- return 0;
-}
-
-static void vigs_device_finish_patch_commands(struct list_head* gem_list)
-{
- struct vigs_gem_object *gem, *gem_tmp;
-
- list_for_each_entry_safe(gem, gem_tmp, gem_list, list) {
- list_del(&gem->list);
- vigs_gem_unreserve(gem);
- drm_gem_object_unreference_unlocked(&gem->base);
- }
-}
-
int vigs_device_init(struct vigs_device *vigs_dev,
struct drm_device *drm_dev,
struct pci_dev *pci_dev,
goto fail2;
}
+ vigs_dev->obj_dev = ttm_object_device_init(vigs_dev->mman->mem_global_ref.object,
+ 12);
+
+ if (!vigs_dev->obj_dev) {
+ DRM_ERROR("Unable to initialize obj_dev\n");
+ ret = -ENOMEM;
+ goto fail3;
+ }
+
+ ret = vigs_fenceman_create(&vigs_dev->fenceman);
+
+ if (ret != 0) {
+ goto fail4;
+ }
+
ret = vigs_comm_create(vigs_dev, &vigs_dev->comm);
if (ret != 0) {
- goto fail3;
+ goto fail5;
}
+ spin_lock_init(&vigs_dev->irq_lock);
+
drm_mode_config_init(vigs_dev->drm_dev);
vigs_framebuffer_config_init(vigs_dev);
ret = vigs_crtc_init(vigs_dev);
if (ret != 0) {
- goto fail4;
+ goto fail6;
}
ret = vigs_output_init(vigs_dev);
if (ret != 0) {
- goto fail4;
+ goto fail6;
}
ret = drm_vblank_init(drm_dev, 1);
if (ret != 0) {
- goto fail4;
+ goto fail6;
}
/*
ret = drm_irq_install(drm_dev);
if (ret != 0) {
- goto fail5;
+ goto fail7;
}
ret = vigs_fbdev_create(vigs_dev, &vigs_dev->fbdev);
if (ret != 0) {
- goto fail6;
+ goto fail8;
}
return 0;
-fail6:
+fail8:
drm_irq_uninstall(drm_dev);
-fail5:
+fail7:
drm_vblank_cleanup(drm_dev);
-fail4:
+fail6:
drm_mode_config_cleanup(vigs_dev->drm_dev);
vigs_comm_destroy(vigs_dev->comm);
+fail5:
+ vigs_fenceman_destroy(vigs_dev->fenceman);
+fail4:
+ ttm_object_device_release(&vigs_dev->obj_dev);
fail3:
vigs_mman_destroy(vigs_dev->mman);
fail2:
drm_vblank_cleanup(vigs_dev->drm_dev);
drm_mode_config_cleanup(vigs_dev->drm_dev);
vigs_comm_destroy(vigs_dev->comm);
+ vigs_fenceman_destroy(vigs_dev->fenceman);
+ ttm_object_device_release(&vigs_dev->obj_dev);
vigs_mman_destroy(vigs_dev->mman);
drm_rmmap(vigs_dev->drm_dev, vigs_dev->io_map);
idr_destroy(&vigs_dev->surface_idr);
mutex_unlock(&vigs_dev->surface_idr_mutex);
}
+struct vigs_surface
+ *vigs_device_reference_surface(struct vigs_device *vigs_dev,
+ vigsp_surface_id sfc_id)
+{
+ struct vigs_surface *sfc;
+
+ mutex_lock(&vigs_dev->surface_idr_mutex);
+
+ sfc = idr_find(&vigs_dev->surface_idr, sfc_id);
+
+ if (sfc) {
+ if (vigs_gem_freed(&sfc->gem)) {
+ sfc = NULL;
+ } else {
+ drm_gem_object_reference(&sfc->gem.base);
+ }
+ }
+
+ mutex_unlock(&vigs_dev->surface_idr_mutex);
+
+ return sfc;
+}
+
int vigs_device_add_surface_unlocked(struct vigs_device *vigs_dev,
struct vigs_surface *sfc,
vigsp_surface_id* id)
vigs_device_remove_surface(vigs_dev, sfc_id);
mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
}
-
-int vigs_device_exec_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv)
-{
- struct vigs_device *vigs_dev = drm_dev->dev_private;
- struct drm_vigs_exec *args = data;
- struct drm_gem_object *gem;
- struct vigs_gem_object *vigs_gem;
- struct vigs_execbuffer *execbuffer;
- struct list_head gem_list;
- int ret;
-
- INIT_LIST_HEAD(&gem_list);
-
- gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
-
- if (gem == NULL) {
- return -ENOENT;
- }
-
- vigs_gem = gem_to_vigs_gem(gem);
-
- if (vigs_gem->type != VIGS_GEM_TYPE_EXECBUFFER) {
- drm_gem_object_unreference_unlocked(gem);
- return -ENOENT;
- }
-
- execbuffer = vigs_gem_to_vigs_execbuffer(vigs_gem);
-
- vigs_gem_reserve(vigs_gem);
-
- /*
- * Never unmap for optimization, but we got to be careful,
- * worst case scenario is when whole RAM BAR is mapped into kernel.
- */
- ret = vigs_gem_kmap(vigs_gem);
-
- if (ret != 0) {
- vigs_gem_unreserve(vigs_gem);
- drm_gem_object_unreference_unlocked(gem);
- return ret;
- }
-
- vigs_gem_unreserve(vigs_gem);
-
- ret = vigs_device_patch_commands(vigs_dev,
- execbuffer->gem.kptr,
- vigs_gem_size(&execbuffer->gem),
- &gem_list);
-
- if (ret != 0) {
- vigs_device_finish_patch_commands(&gem_list);
- drm_gem_object_unreference_unlocked(gem);
- return ret;
- }
-
- vigs_comm_exec(vigs_dev->comm, execbuffer);
-
- vigs_device_finish_patch_commands(&gem_list);
- drm_gem_object_unreference_unlocked(gem);
-
- return 0;
-}
#include "vigs_protocol.h"
struct vigs_mman;
+struct vigs_fenceman;
struct vigs_comm;
struct vigs_fbdev;
struct vigs_surface;
struct vigs_mman *mman;
+ struct ttm_object_device *obj_dev;
+
+ struct vigs_fenceman *fenceman;
+
struct vigs_comm *comm;
struct vigs_fbdev *fbdev;
+ /*
+ * We need this because it's essential to read 'lower' and 'upper'
+ * fence acks atomically in IRQ handler and on SMP systems IRQ handler
+ * can be run on several CPUs concurrently.
+ */
+ spinlock_t irq_lock;
+
/*
* A hack we're forced to have in order to tell if we
* need to track GEM access or not in 'vigs_device_mmap'.
void vigs_device_remove_surface(struct vigs_device *vigs_dev,
vigsp_surface_id sfc_id);
+struct vigs_surface
+ *vigs_device_reference_surface(struct vigs_device *vigs_dev,
+ vigsp_surface_id sfc_id);
+
/*
* Locks drm_device::struct_mutex.
* @{
* @}
*/
-/*
- * IOCTLs
- * @{
- */
-
-int vigs_device_exec_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv);
-
-/*
- * @}
- */
-
#endif
#include "vigs_surface.h"
#include "vigs_execbuffer.h"
#include "vigs_irq.h"
+#include "vigs_fence.h"
+#include "vigs_file.h"
+#include "vigs_mman.h"
#include "drmP.h"
#include "drm.h"
#include <linux/module.h>
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(VIGS_GEM_MAP, vigs_gem_map_ioctl,
DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_GEM_WAIT, vigs_gem_wait_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(VIGS_SURFACE_INFO, vigs_surface_info_ioctl,
DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIGS_EXEC, vigs_device_exec_ioctl,
+ DRM_IOCTL_DEF_DRV(VIGS_EXEC, vigs_execbuffer_exec_ioctl,
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(VIGS_SURFACE_SET_GPU_DIRTY, vigs_surface_set_gpu_dirty_ioctl,
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(VIGS_SURFACE_START_ACCESS, vigs_surface_start_access_ioctl,
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(VIGS_SURFACE_END_ACCESS, vigs_surface_end_access_ioctl,
- DRM_UNLOCKED | DRM_AUTH)
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_CREATE_FENCE, vigs_fence_create_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_FENCE_WAIT, vigs_fence_wait_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_FENCE_SIGNALED, vigs_fence_signaled_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_FENCE_UNREF, vigs_fence_unref_ioctl,
+ DRM_UNLOCKED | DRM_AUTH)
};
static const struct file_operations vigs_drm_driver_fops =
return 0;
}
+static int vigs_drm_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+ int ret = 0;
+ struct vigs_device *vigs_dev = dev->dev_private;
+ struct vigs_file *vigs_file;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ ret = vigs_file_create(vigs_dev, &vigs_file);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ file_priv->driver_priv = vigs_file;
+
+ if (unlikely(vigs_dev->mman->bo_dev.dev_mapping == NULL)) {
+ vigs_dev->mman->bo_dev.dev_mapping =
+ file_priv->filp->f_path.dentry->d_inode->i_mapping;
+ }
+
+ return 0;
+}
static void vigs_drm_preclose(struct drm_device *dev,
struct drm_file *file_priv)
static void vigs_drm_postclose(struct drm_device *dev,
struct drm_file *file_priv)
{
+ struct vigs_file *vigs_file = file_priv->driver_priv;
+
DRM_DEBUG_DRIVER("enter\n");
+
+ vigs_file_destroy(vigs_file);
+
+ file_priv->driver_priv = NULL;
}
static void vigs_drm_lastclose(struct drm_device *dev)
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.load = vigs_drm_load,
.unload = vigs_drm_unload,
+ .open = vigs_drm_open,
.preclose = vigs_drm_preclose,
.postclose = vigs_drm_postclose,
.lastclose = vigs_drm_lastclose,
#include "vigs_execbuffer.h"
+#include "vigs_device.h"
+#include "vigs_surface.h"
+#include "vigs_comm.h"
+#include "vigs_fence.h"
#include <drm/vigs_drm.h>
+union vigs_request
+{
+ struct vigsp_cmd_update_vram_request *update_vram;
+ struct vigsp_cmd_update_gpu_request *update_gpu;
+ struct vigsp_cmd_copy_request *copy;
+ struct vigsp_cmd_solid_fill_request *solid_fill;
+ void *data;
+};
+
+static int vigs_execbuffer_validate_buffer(struct vigs_device *vigs_dev,
+ struct vigs_validate_buffer *buffer,
+ struct list_head* list,
+ vigsp_surface_id sfc_id,
+ vigsp_cmd cmd,
+ int which,
+ void *data)
+{
+ struct vigs_surface *sfc = vigs_device_reference_surface(vigs_dev, sfc_id);
+ struct vigs_validate_buffer *tmp;
+
+ if (!sfc) {
+ DRM_ERROR("Surface %u not found\n", sfc_id);
+ return -EINVAL;
+ }
+
+ buffer->base.new_sync_obj_arg = NULL;
+ buffer->base.bo = &sfc->gem.bo;
+ buffer->cmd = cmd;
+ buffer->which = which;
+ buffer->data = data;
+
+ list_for_each_entry(tmp, list, base.head) {
+ if (tmp->base.bo == buffer->base.bo) {
+ /*
+ * Already on the list, we're done.
+ */
+ return 0;
+ }
+ }
+
+ list_add_tail(&buffer->base.head, list);
+
+ return 0;
+}
+
+static void vigs_execbuffer_clear_validation(struct vigs_validate_buffer *buffer)
+{
+ struct vigs_gem_object *gem = bo_to_vigs_gem(buffer->base.bo);
+
+ drm_gem_object_unreference(&gem->base);
+}
+
static void vigs_execbuffer_destroy(struct vigs_gem_object *gem)
{
}
return ret;
}
+int vigs_execbuffer_validate_buffers(struct vigs_execbuffer *execbuffer,
+ struct list_head* list,
+ struct vigs_validate_buffer **buffers,
+ int *num_buffers,
+ bool *sync)
+{
+ struct vigs_device *vigs_dev = execbuffer->gem.base.dev->dev_private;
+ void *data = execbuffer->gem.kptr;
+ u32 data_size = vigs_gem_size(&execbuffer->gem);
+ struct vigsp_cmd_batch_header *batch_header = data;
+ struct vigsp_cmd_request_header *request_header =
+ (struct vigsp_cmd_request_header*)(batch_header + 1);
+ union vigs_request request;
+ int num_commands = 0, ret = 0;
+
+ *num_buffers = 0;
+ *sync = false;
+
+ /*
+ * GEM is always at least PAGE_SIZE long, so don't check
+ * if batch header is out of bounds.
+ */
+
+ while ((void*)request_header <
+ ((void*)(batch_header + 1) + batch_header->size)) {
+ if (((void*)(request_header) + sizeof(*request_header)) >
+ (data + data_size)) {
+ DRM_ERROR("request header outside of GEM\n");
+ ret = -EINVAL;
+ goto fail1;
+ }
+
+ if (((void*)(request_header + 1) + request_header->size) >
+ (data + data_size)) {
+ DRM_ERROR("request data outside of GEM\n");
+ ret = -EINVAL;
+ goto fail1;
+ }
+
+ request.data = (request_header + 1);
+
+ switch (request_header->cmd) {
+ case vigsp_cmd_update_vram:
+ case vigsp_cmd_update_gpu:
+ *sync = true;
+ *num_buffers += 1;
+ break;
+ case vigsp_cmd_copy:
+ *num_buffers += 2;
+ break;
+ case vigsp_cmd_solid_fill:
+ *num_buffers += 1;
+ break;
+ default:
+ break;
+ }
+
+ request_header =
+ (struct vigsp_cmd_request_header*)(request.data +
+ request_header->size);
+
+ ++num_commands;
+ }
+
+ *buffers = kmalloc(*num_buffers * sizeof(**buffers), GFP_KERNEL);
+
+ if (!*buffers) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ request_header = (struct vigsp_cmd_request_header*)(batch_header + 1);
+
+ mutex_lock(&vigs_dev->drm_dev->struct_mutex);
+
+ *num_buffers = 0;
+
+ while (--num_commands >= 0) {
+ request.data = (request_header + 1);
+
+ switch (request_header->cmd) {
+ case vigsp_cmd_update_vram:
+ ret = vigs_execbuffer_validate_buffer(vigs_dev,
+ &(*buffers)[*num_buffers],
+ list,
+ request.update_vram->sfc_id,
+ request_header->cmd,
+ 0,
+ request.data);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ++*num_buffers;
+
+ break;
+ case vigsp_cmd_update_gpu:
+ ret = vigs_execbuffer_validate_buffer(vigs_dev,
+ &(*buffers)[*num_buffers],
+ list,
+ request.update_gpu->sfc_id,
+ request_header->cmd,
+ 0,
+ request.data);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ++*num_buffers;
+
+ break;
+ case vigsp_cmd_copy:
+ ret = vigs_execbuffer_validate_buffer(vigs_dev,
+ &(*buffers)[*num_buffers],
+ list,
+ request.copy->src_id,
+ request_header->cmd,
+ 0,
+ request.data);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ++*num_buffers;
+
+ ret = vigs_execbuffer_validate_buffer(vigs_dev,
+ &(*buffers)[*num_buffers],
+ list,
+ request.copy->dst_id,
+ request_header->cmd,
+ 1,
+ request.data);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ++*num_buffers;
+
+ break;
+ case vigsp_cmd_solid_fill:
+ ret = vigs_execbuffer_validate_buffer(vigs_dev,
+ &(*buffers)[*num_buffers],
+ list,
+ request.solid_fill->sfc_id,
+ request_header->cmd,
+ 0,
+ request.data);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ++*num_buffers;
+
+ break;
+ default:
+ break;
+ }
+
+ request_header =
+ (struct vigsp_cmd_request_header*)(request.data +
+ request_header->size);
+ }
+
+ mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+
+ return 0;
+
+fail2:
+ while (--*num_buffers >= 0) {
+ vigs_execbuffer_clear_validation(&(*buffers)[*num_buffers]);
+ }
+ mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+ kfree(*buffers);
+fail1:
+ *buffers = NULL;
+
+ return ret;
+}
+
+void vigs_execbuffer_process_buffers(struct vigs_execbuffer *execbuffer,
+ struct vigs_validate_buffer *buffers,
+ int num_buffers)
+{
+ union vigs_request request;
+ struct vigs_gem_object *gem;
+ struct vigs_surface *sfc;
+ int i;
+
+ for (i = 0; i < num_buffers; ++i) {
+ request.data = buffers[i].data;
+ gem = bo_to_vigs_gem(buffers[i].base.bo);
+ sfc = vigs_gem_to_vigs_surface(gem);
+
+ switch (buffers[i].cmd) {
+ case vigsp_cmd_update_vram:
+ if (vigs_gem_in_vram(&sfc->gem)) {
+ if (vigs_surface_need_vram_update(sfc)) {
+ request.update_vram->offset = vigs_gem_offset(&sfc->gem);
+ sfc->is_gpu_dirty = false;
+ } else {
+ DRM_DEBUG_DRIVER("Surface %u doesn't need to be updated, ignoring update_vram\n",
+ request.update_vram->sfc_id);
+ request.update_vram->sfc_id = 0;
+ }
+ } else {
+ DRM_DEBUG_DRIVER("Surface %u not in VRAM, ignoring update_vram\n",
+ request.update_vram->sfc_id);
+ request.update_vram->sfc_id = 0;
+ }
+ break;
+ case vigsp_cmd_update_gpu:
+ if (vigs_gem_in_vram(&sfc->gem)) {
+ if (vigs_surface_need_gpu_update(sfc)) {
+ request.update_gpu->offset = vigs_gem_offset(&sfc->gem);
+ sfc->is_gpu_dirty = false;
+ } else {
+ DRM_DEBUG_DRIVER("Surface %u doesn't need to be updated, ignoring update_gpu\n",
+ request.update_gpu->sfc_id);
+ request.update_gpu->sfc_id = 0;
+ }
+ } else {
+ DRM_DEBUG_DRIVER("Surface %u not in VRAM, ignoring update_gpu\n",
+ request.update_gpu->sfc_id);
+ request.update_gpu->sfc_id = 0;
+ }
+ break;
+ case vigsp_cmd_copy:
+ if (buffers[i].which && vigs_gem_in_vram(&sfc->gem)) {
+ sfc->is_gpu_dirty = true;
+ }
+ break;
+ case vigsp_cmd_solid_fill:
+ if (vigs_gem_in_vram(&sfc->gem)) {
+ sfc->is_gpu_dirty = true;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void vigs_execbuffer_fence(struct vigs_execbuffer *execbuffer,
+ struct vigs_fence *fence)
+{
+ struct vigsp_cmd_batch_header *batch_header = execbuffer->gem.kptr;
+
+ batch_header->fence_seq = fence->seq;
+}
+
+void vigs_execbuffer_clear_validations(struct vigs_execbuffer *execbuffer,
+ struct vigs_validate_buffer *buffers,
+ int num_buffers)
+{
+ struct vigs_device *vigs_dev = execbuffer->gem.base.dev->dev_private;
+ int i;
+
+ mutex_lock(&vigs_dev->drm_dev->struct_mutex);
+
+ for (i = 0; i < num_buffers; ++i) {
+ vigs_execbuffer_clear_validation(&buffers[i]);
+ }
+
+ mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+
+ kfree(buffers);
+}
+
int vigs_execbuffer_create_ioctl(struct drm_device *drm_dev,
void *data,
struct drm_file *file_priv)
return ret;
}
+
+int vigs_execbuffer_exec_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct drm_vigs_exec *args = data;
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+ struct vigs_execbuffer *execbuffer;
+ struct list_head list;
+ struct vigs_validate_buffer *buffers;
+ int num_buffers = 0;
+ struct vigs_fence *fence = NULL;
+ bool sync = false;
+ int ret = 0;
+
+ INIT_LIST_HEAD(&list);
+
+ gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
+
+ if (gem == NULL) {
+ ret = -ENOENT;
+ goto out1;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ if (vigs_gem->type != VIGS_GEM_TYPE_EXECBUFFER) {
+ ret = -ENOENT;
+ goto out2;
+ }
+
+ execbuffer = vigs_gem_to_vigs_execbuffer(vigs_gem);
+
+ vigs_gem_reserve(vigs_gem);
+
+ /*
+ * Never unmap for optimization, but we got to be careful,
+ * worst case scenario is when whole RAM BAR is mapped into kernel.
+ */
+ ret = vigs_gem_kmap(vigs_gem);
+
+ if (ret != 0) {
+ vigs_gem_unreserve(vigs_gem);
+ goto out2;
+ }
+
+ vigs_gem_unreserve(vigs_gem);
+
+ ret = vigs_execbuffer_validate_buffers(execbuffer,
+ &list,
+ &buffers,
+ &num_buffers,
+ &sync);
+
+ if (ret != 0) {
+ goto out2;
+ }
+
+ if (list_empty(&list)) {
+ vigs_comm_exec(vigs_dev->comm, execbuffer);
+ } else {
+ ret = ttm_eu_reserve_buffers(&list);
+
+ if (ret != 0) {
+ ttm_eu_backoff_reservation(&list);
+ goto out3;
+ }
+
+ ret = vigs_fence_create(vigs_dev->fenceman, &fence);
+
+ if (ret != 0) {
+ ttm_eu_backoff_reservation(&list);
+ goto out3;
+ }
+
+ vigs_execbuffer_process_buffers(execbuffer, buffers, num_buffers);
+
+ vigs_execbuffer_fence(execbuffer, fence);
+
+ vigs_comm_exec(vigs_dev->comm, execbuffer);
+
+ ttm_eu_fence_buffer_objects(&list, fence);
+
+ if (sync) {
+ vigs_fence_wait(fence, false);
+ }
+
+ vigs_fence_unref(fence);
+ }
+
+out3:
+ vigs_execbuffer_clear_validations(execbuffer, buffers, num_buffers);
+out2:
+ drm_gem_object_unreference_unlocked(gem);
+out1:
+ return ret;
+}
#include "drmP.h"
#include "vigs_gem.h"
+#include "vigs_protocol.h"
+#include <ttm/ttm_execbuf_util.h>
+
+struct vigs_fence;
+
+struct vigs_validate_buffer
+{
+ struct ttm_validate_buffer base;
+
+ vigsp_cmd cmd;
+
+ int which;
+
+ void *data;
+};
struct vigs_execbuffer
{
bool kernel,
struct vigs_execbuffer **execbuffer);
+int vigs_execbuffer_validate_buffers(struct vigs_execbuffer *execbuffer,
+ struct list_head* list,
+ struct vigs_validate_buffer **buffers,
+ int *num_buffers,
+ bool *sync);
+
+void vigs_execbuffer_process_buffers(struct vigs_execbuffer *execbuffer,
+ struct vigs_validate_buffer *buffers,
+ int num_buffers);
+
+void vigs_execbuffer_fence(struct vigs_execbuffer *execbuffer,
+ struct vigs_fence *fence);
+
+void vigs_execbuffer_clear_validations(struct vigs_execbuffer *execbuffer,
+ struct vigs_validate_buffer *buffers,
+ int num_buffers);
+
/*
* IOCTLs
* @{
void *data,
struct drm_file *file_priv);
+int vigs_execbuffer_exec_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
/*
* @}
*/
--- /dev/null
+#include "vigs_fence.h"
+#include "vigs_fenceman.h"
+#include "vigs_file.h"
+#include "vigs_device.h"
+#include "vigs_comm.h"
+#include <drm/vigs_drm.h>
+
+static void vigs_fence_cleanup(struct vigs_fence *fence)
+{
+}
+
+static void vigs_fence_destroy(struct vigs_fence *fence)
+{
+ vigs_fence_cleanup(fence);
+ kfree(fence);
+}
+
+static void vigs_user_fence_destroy(struct vigs_fence *fence)
+{
+ struct vigs_user_fence *user_fence = vigs_fence_to_vigs_user_fence(fence);
+
+ vigs_fence_cleanup(&user_fence->fence);
+ kfree(user_fence);
+}
+
+static void vigs_fence_release_locked(struct kref *kref)
+{
+ struct vigs_fence *fence = kref_to_vigs_fence(kref);
+
+ DRM_DEBUG_DRIVER("Fence destroyed (seq = %u, signaled = %u)\n",
+ fence->seq,
+ fence->signaled);
+
+ list_del_init(&fence->list);
+ fence->destroy(fence);
+}
+
+static void vigs_user_fence_refcount_release(struct ttm_base_object **base)
+{
+ struct ttm_base_object *tmp = *base;
+ struct vigs_user_fence *user_fence = base_to_vigs_user_fence(tmp);
+
+ vigs_fence_unref(&user_fence->fence);
+ *base = NULL;
+}
+
+static void vigs_fence_init(struct vigs_fence *fence,
+ struct vigs_fenceman *fenceman,
+ void (*destroy)(struct vigs_fence*))
+{
+ unsigned long flags;
+
+ kref_init(&fence->kref);
+ INIT_LIST_HEAD(&fence->list);
+ fence->fenceman = fenceman;
+ fence->signaled = false;
+ init_waitqueue_head(&fence->wait);
+ fence->destroy = destroy;
+
+ spin_lock_irqsave(&fenceman->lock, flags);
+
+ fence->seq = vigs_fence_seq_next(fenceman->seq);
+ fenceman->seq = fence->seq;
+
+ list_add_tail(&fence->list, &fenceman->fence_list);
+
+ spin_unlock_irqrestore(&fenceman->lock, flags);
+
+ DRM_DEBUG_DRIVER("Fence created (seq = %u)\n", fence->seq);
+}
+
+int vigs_fence_create(struct vigs_fenceman *fenceman,
+ struct vigs_fence **fence)
+{
+ int ret = 0;
+
+ *fence = kzalloc(sizeof(**fence), GFP_KERNEL);
+
+ if (!*fence) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ vigs_fence_init(*fence, fenceman, &vigs_fence_destroy);
+
+ return 0;
+
+fail1:
+ *fence = NULL;
+
+ return ret;
+}
+
+int vigs_user_fence_create(struct vigs_fenceman *fenceman,
+ struct drm_file *file_priv,
+ struct vigs_user_fence **user_fence,
+ uint32_t *handle)
+{
+ struct vigs_file *vigs_file = file_priv->driver_priv;
+ int ret = 0;
+
+ *user_fence = kzalloc(sizeof(**user_fence), GFP_KERNEL);
+
+ if (!*user_fence) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ vigs_fence_init(&(*user_fence)->fence, fenceman, &vigs_user_fence_destroy);
+
+ ret = ttm_base_object_init(vigs_file->obj_file,
+ &(*user_fence)->base, false,
+ VIGS_FENCE_TYPE,
+ &vigs_user_fence_refcount_release,
+ NULL);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ /*
+ * For ttm_base_object.
+ */
+ vigs_fence_ref(&(*user_fence)->fence);
+
+ *handle = (*user_fence)->base.hash.key;
+
+ return 0;
+
+fail2:
+ vigs_fence_cleanup(&(*user_fence)->fence);
+ kfree(*user_fence);
+fail1:
+ *user_fence = NULL;
+
+ return ret;
+}
+
+int vigs_fence_wait(struct vigs_fence *fence, bool interruptible)
+{
+ long ret = 0;
+
+ if (vigs_fence_signaled(fence)) {
+ DRM_DEBUG_DRIVER("Fence wait (seq = %u, signaled = %u)\n",
+ fence->seq,
+ fence->signaled);
+ return 0;
+ }
+
+ DRM_DEBUG_DRIVER("Fence wait (seq = %u)\n", fence->seq);
+
+ if (interruptible) {
+ ret = wait_event_interruptible(fence->wait, vigs_fence_signaled(fence));
+ } else {
+ wait_event(fence->wait, vigs_fence_signaled(fence));
+ }
+
+ if (ret != 0) {
+ DRM_INFO("Fence wait interrupted (seq = %u) = %ld\n", fence->seq, ret);
+ } else {
+ DRM_DEBUG_DRIVER("Fence wait done (seq = %u)\n", fence->seq);
+ }
+
+ return ret;
+}
+
+bool vigs_fence_signaled(struct vigs_fence *fence)
+{
+ unsigned long flags;
+ bool signaled;
+
+ spin_lock_irqsave(&fence->fenceman->lock, flags);
+
+ signaled = fence->signaled;
+
+ spin_unlock_irqrestore(&fence->fenceman->lock, flags);
+
+ return signaled;
+}
+
+void vigs_fence_ref(struct vigs_fence *fence)
+{
+ if (unlikely(!fence)) {
+ return;
+ }
+
+ kref_get(&fence->kref);
+}
+
+void vigs_fence_unref(struct vigs_fence *fence)
+{
+ struct vigs_fenceman *fenceman;
+
+ if (unlikely(!fence)) {
+ return;
+ }
+
+ fenceman = fence->fenceman;
+
+ spin_lock_irq(&fenceman->lock);
+ BUG_ON(atomic_read(&fence->kref.refcount) == 0);
+ kref_put(&fence->kref, vigs_fence_release_locked);
+ spin_unlock_irq(&fenceman->lock);
+}
+
+int vigs_fence_create_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct vigs_file *vigs_file = file_priv->driver_priv;
+ struct drm_vigs_create_fence *args = data;
+ struct vigs_user_fence *user_fence;
+ uint32_t handle;
+ int ret;
+
+ ret = vigs_user_fence_create(vigs_dev->fenceman,
+ file_priv,
+ &user_fence,
+ &handle);
+
+ if (ret != 0) {
+ goto out;
+ }
+
+ if (args->send) {
+ ret = vigs_comm_fence(vigs_dev->comm, &user_fence->fence);
+
+ if (ret != 0) {
+ ttm_ref_object_base_unref(vigs_file->obj_file,
+ handle,
+ TTM_REF_USAGE);
+ goto out;
+ }
+ }
+
+ args->handle = handle;
+ args->seq = user_fence->fence.seq;
+
+out:
+ vigs_fence_unref(&user_fence->fence);
+
+ return ret;
+}
+
+int vigs_fence_wait_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_file *vigs_file = file_priv->driver_priv;
+ struct drm_vigs_fence_wait *args = data;
+ struct ttm_base_object *base;
+ struct vigs_user_fence *user_fence;
+ int ret;
+
+ base = ttm_base_object_lookup(vigs_file->obj_file, args->handle);
+
+ if (!base) {
+ return -ENOENT;
+ }
+
+ user_fence = base_to_vigs_user_fence(base);
+
+ ret = vigs_fence_wait(&user_fence->fence, true);
+
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
+
+int vigs_fence_signaled_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_file *vigs_file = file_priv->driver_priv;
+ struct drm_vigs_fence_signaled *args = data;
+ struct ttm_base_object *base;
+ struct vigs_user_fence *user_fence;
+
+ base = ttm_base_object_lookup(vigs_file->obj_file, args->handle);
+
+ if (!base) {
+ return -ENOENT;
+ }
+
+ user_fence = base_to_vigs_user_fence(base);
+
+ args->signaled = vigs_fence_signaled(&user_fence->fence);
+
+ ttm_base_object_unref(&base);
+
+ return 0;
+}
+
+int vigs_fence_unref_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_file *vigs_file = file_priv->driver_priv;
+ struct drm_vigs_fence_unref *args = data;
+
+ return ttm_ref_object_base_unref(vigs_file->obj_file,
+ args->handle,
+ TTM_REF_USAGE);
+}
--- /dev/null
+#ifndef _VIGS_FENCE_H_
+#define _VIGS_FENCE_H_
+
+#include "drmP.h"
+#include <ttm/ttm_object.h>
+
+#define VIGS_FENCE_TYPE ttm_driver_type2
+
+struct vigs_fenceman;
+
+struct vigs_fence
+{
+ struct kref kref;
+
+ struct list_head list;
+
+ struct vigs_fenceman *fenceman;
+
+ uint32_t seq;
+
+ bool signaled;
+
+ wait_queue_head_t wait;
+
+ void (*destroy)(struct vigs_fence *fence);
+};
+
+/*
+ * Users can access fences via TTM base object mechanism,
+ * thus, we need to wrap vigs_fence into vigs_user_fence because
+ * not every fence object needs to be referenced from user space.
+ * So no point in always having struct ttm_base_object inside vigs_fence.
+ */
+
+struct vigs_user_fence
+{
+ struct ttm_base_object base;
+
+ struct vigs_fence fence;
+};
+
+static inline struct vigs_fence *kref_to_vigs_fence(struct kref *kref)
+{
+ return container_of(kref, struct vigs_fence, kref);
+}
+
+static inline struct vigs_user_fence *vigs_fence_to_vigs_user_fence(struct vigs_fence *fence)
+{
+ return container_of(fence, struct vigs_user_fence, fence);
+}
+
+static inline struct vigs_user_fence *base_to_vigs_user_fence(struct ttm_base_object *base)
+{
+ return container_of(base, struct vigs_user_fence, base);
+}
+
+static inline uint32_t vigs_fence_seq_next(uint32_t seq)
+{
+ if (++seq == 0) {
+ ++seq;
+ }
+ return seq;
+}
+
+#define vigs_fence_seq_num_after(a, b) \
+ (typecheck(u32, a) && typecheck(u32, b) && ((s32)(b) - (s32)(a) < 0))
+
+#define vigs_fence_seq_num_before(a, b) vigs_fence_seq_num_after(b, a)
+
+#define vigs_fence_seq_num_after_eq(a, b) \
+ ( typecheck(u32, a) && typecheck(u32, b) && \
+ ((s32)(a) - (s32)(b) >= 0) )
+
+#define vigs_fence_seq_num_before_eq(a, b) vigs_fence_seq_num_after_eq(b, a)
+
+int vigs_fence_create(struct vigs_fenceman *fenceman,
+ struct vigs_fence **fence);
+
+int vigs_user_fence_create(struct vigs_fenceman *fenceman,
+ struct drm_file *file_priv,
+ struct vigs_user_fence **user_fence,
+ uint32_t *handle);
+
+int vigs_fence_wait(struct vigs_fence *fence, bool interruptible);
+
+bool vigs_fence_signaled(struct vigs_fence *fence);
+
+/*
+ * Passing NULL won't hurt, this is for convenience.
+ */
+void vigs_fence_ref(struct vigs_fence *fence);
+
+/*
+ * Passing NULL won't hurt, this is for convenience.
+ */
+void vigs_fence_unref(struct vigs_fence *fence);
+
+/*
+ * IOCTLs
+ * @{
+ */
+
+int vigs_fence_create_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_fence_wait_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_fence_signaled_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_fence_unref_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
+#endif
--- /dev/null
+#include "vigs_fenceman.h"
+#include "vigs_fence.h"
+
+int vigs_fenceman_create(struct vigs_fenceman **fenceman)
+{
+ int ret = 0;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ *fenceman = kzalloc(sizeof(**fenceman), GFP_KERNEL);
+
+ if (!*fenceman) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ spin_lock_init(&(*fenceman)->lock);
+ INIT_LIST_HEAD(&(*fenceman)->fence_list);
+ (*fenceman)->seq = UINT_MAX;
+
+ return 0;
+
+fail1:
+ *fenceman = NULL;
+
+ return ret;
+}
+
+void vigs_fenceman_destroy(struct vigs_fenceman *fenceman)
+{
+ unsigned long flags;
+ bool fence_list_empty;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ spin_lock_irqsave(&fenceman->lock, flags);
+ fence_list_empty = list_empty(&fenceman->fence_list);
+ spin_unlock_irqrestore(&fenceman->lock, flags);
+
+ BUG_ON(!fence_list_empty);
+
+ kfree(fenceman);
+}
+
+void vigs_fenceman_ack(struct vigs_fenceman *fenceman,
+ uint32_t lower, uint32_t upper)
+{
+ unsigned long flags;
+ struct vigs_fence *fence, *tmp;
+
+ spin_lock_irqsave(&fenceman->lock, flags);
+
+ list_for_each_entry_safe(fence, tmp, &fenceman->fence_list, list) {
+ if (vigs_fence_seq_num_after_eq(fence->seq, lower) &&
+ vigs_fence_seq_num_before_eq(fence->seq, upper)) {
+ DRM_DEBUG_DRIVER("Fence signaled (seq = %u)\n",
+ fence->seq);
+ list_del_init(&fence->list);
+ fence->signaled = true;
+ wake_up_all(&fence->wait);
+ }
+ }
+
+ spin_unlock_irqrestore(&fenceman->lock, flags);
+}
--- /dev/null
+#ifndef _VIGS_FENCEMAN_H_
+#define _VIGS_FENCEMAN_H_
+
+#include "drmP.h"
+
+/*
+ * This is fence manager for VIGS. It's responsible for the following:
+ * + Fence bookkeeping.
+ * + Fence sequence number management and IRQ processing.
+ */
+
+struct vigs_fenceman
+{
+ /*
+ * Lock that's used to guard all data inside
+ * fence manager and fence objects. Don't confuse it
+ * with struct ttm_bo_device::fence_lock, that lock
+ * is used to work with TTM sync objects, i.e. it's more
+ * "high level".
+ */
+ spinlock_t lock;
+
+ /*
+ * List of currently pending fences.
+ */
+ struct list_head fence_list;
+
+ /*
+ * Current sequence number, new fence should be
+ * assigned (seq + 1).
+ * Note! Sequence numbers are always non-0, 0 is
+ * a special value that tells GPU not to fence things.
+ */
+ uint32_t seq;
+};
+
+int vigs_fenceman_create(struct vigs_fenceman **fenceman);
+
+void vigs_fenceman_destroy(struct vigs_fenceman *fenceman);
+
+/*
+ * Can be called from IRQ handler.
+ */
+void vigs_fenceman_ack(struct vigs_fenceman *fenceman,
+ uint32_t lower, uint32_t upper);
+
+#endif
--- /dev/null
+#include "vigs_file.h"
+#include "vigs_device.h"
+
+int vigs_file_create(struct vigs_device *vigs_dev,
+ struct vigs_file **vigs_file)
+{
+ int ret = 0;
+
+ *vigs_file = kzalloc(sizeof(**vigs_file), GFP_KERNEL);
+
+ if (!*vigs_file) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ (*vigs_file)->obj_file = ttm_object_file_init(vigs_dev->obj_dev, 10);
+
+ if (!(*vigs_file)->obj_file) {
+ ret = -ENOMEM;
+ goto fail2;
+ }
+
+ return 0;
+
+fail2:
+ kfree(*vigs_file);
+fail1:
+ *vigs_file = NULL;
+
+ return ret;
+}
+
+void vigs_file_destroy(struct vigs_file *vigs_file)
+{
+ ttm_object_file_release(&vigs_file->obj_file);
+ kfree(vigs_file);
+}
--- /dev/null
+#ifndef _VIGS_FILE_H_
+#define _VIGS_FILE_H_
+
+#include "drmP.h"
+#include <ttm/ttm_object.h>
+
+struct vigs_device;
+
+struct vigs_file
+{
+ struct ttm_object_file *obj_file;
+};
+
+int vigs_file_create(struct vigs_device *vigs_dev,
+ struct vigs_file **vigs_file);
+
+void vigs_file_destroy(struct vigs_file *vigs_file);
+
+#endif
return -EINVAL;
}
- INIT_LIST_HEAD(&vigs_gem->list);
-
memset(&placement, 0, sizeof(placement));
placement.placement = placements;
return vigs_gem->bo.mem.mem_type == TTM_PL_VRAM;
}
+int vigs_gem_wait(struct vigs_gem_object *vigs_gem)
+{
+ int ret;
+
+ spin_lock(&vigs_gem->bo.bdev->fence_lock);
+
+ ret = ttm_bo_wait(&vigs_gem->bo, true, false, false);
+
+ spin_unlock(&vigs_gem->bo.bdev->fence_lock);
+
+ return ret;
+}
+
void vigs_gem_free_object(struct drm_gem_object *gem)
{
struct vigs_gem_object *vigs_gem = gem_to_vigs_gem(gem);
return 0;
}
+int vigs_gem_wait_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vigs_gem_wait *args = data;
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+ int ret;
+
+ gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
+
+ if (gem == NULL) {
+ return -ENOENT;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ vigs_gem_reserve(vigs_gem);
+
+ ret = vigs_gem_wait(vigs_gem);
+
+ vigs_gem_unreserve(vigs_gem);
+
+ drm_gem_object_unreference_unlocked(gem);
+
+ return ret;
+}
+
int vigs_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *drm_dev,
struct drm_mode_create_dumb *args)
*/
bool freed;
- /*
- * Use it only when this GEM is reserved. This makes it easier
- * to reserve a set of GEMs and then unreserve them later.
- */
- struct list_head list;
-
enum ttm_object_type type;
/*
*/
int vigs_gem_in_vram(struct vigs_gem_object *vigs_gem);
+int vigs_gem_wait(struct vigs_gem_object *vigs_gem);
+
/*
* @}
*/
void *data,
struct drm_file *file_priv);
+int vigs_gem_wait_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
/*
* @}
*/
#include "vigs_irq.h"
#include "vigs_device.h"
#include "vigs_regs.h"
+#include "vigs_fenceman.h"
static void vigs_finish_pageflips(struct vigs_device *vigs_dev)
{
{
struct drm_device *drm_dev = (struct drm_device*)arg;
struct vigs_device *vigs_dev = drm_dev->dev_private;
- u32 value;
+ u32 int_value;
+ irqreturn_t ret = IRQ_NONE;
- value = readl(vigs_dev->io_map->handle + VIGS_REG_INT);
+ int_value = readl(vigs_dev->io_map->handle + VIGS_REG_INT);
+
+ if ((int_value & (VIGS_REG_INT_VBLANK_PENDING | VIGS_REG_INT_FENCE_ACK_PENDING)) != 0) {
+ /*
+ * Clear the interrupt first in order
+ * not to stall the hardware.
+ */
+
+ writel(int_value, vigs_dev->io_map->handle + VIGS_REG_INT);
- if ((value & VIGS_REG_INT_VBLANK_PENDING) == 0) {
- return IRQ_NONE;
+ ret = IRQ_HANDLED;
}
- /*
- * Clear the interrupt first in order
- * not to stall the hardware.
- */
+ if ((int_value & VIGS_REG_INT_FENCE_ACK_PENDING) != 0) {
+ u32 lower, upper;
- value &= ~VIGS_REG_INT_VBLANK_PENDING;
+ while (1) {
+ spin_lock(&vigs_dev->irq_lock);
- writel(value, vigs_dev->io_map->handle + VIGS_REG_INT);
+ lower = readl(vigs_dev->io_map->handle + VIGS_REG_FENCE_LOWER);
+ upper = readl(vigs_dev->io_map->handle + VIGS_REG_FENCE_UPPER);
- /*
- * Handle VBLANK.
- */
+ spin_unlock(&vigs_dev->irq_lock);
- drm_handle_vblank(drm_dev, 0);
+ if (lower) {
+ vigs_fenceman_ack(vigs_dev->fenceman, lower, upper);
+ } else {
+ break;
+ }
+ }
+ }
- vigs_finish_pageflips(vigs_dev);
+ if ((int_value & VIGS_REG_INT_VBLANK_PENDING) != 0) {
+ drm_handle_vblank(drm_dev, 0);
+
+ vigs_finish_pageflips(vigs_dev);
+ }
- return IRQ_HANDLED;
+ return ret;
}
#include "vigs_mman.h"
+#include "vigs_fence.h"
#include <ttm/ttm_placement.h>
/*
.destroy = &vigs_ttm_backend_destroy,
};
-struct ttm_tt *vigs_ttm_tt_create(struct ttm_bo_device *bo_dev,
- unsigned long size,
- uint32_t page_flags,
- struct page *dummy_read_page)
+static struct ttm_tt *vigs_ttm_tt_create(struct ttm_bo_device *bo_dev,
+ unsigned long size,
+ uint32_t page_flags,
+ struct page *dummy_read_page)
{
struct ttm_dma_tt *dma_tt;
return 0;
}
-int vigs_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+static bool vigs_ttm_sync_obj_signaled(void *sync_obj, void *sync_arg)
+{
+ return vigs_fence_signaled((struct vigs_fence*)sync_obj);
+}
+
+static int vigs_ttm_sync_obj_wait(void *sync_obj,
+ void *sync_arg,
+ bool lazy,
+ bool interruptible)
+{
+ return vigs_fence_wait((struct vigs_fence*)sync_obj, interruptible);
+}
+
+static int vigs_ttm_sync_obj_flush(void *sync_obj,
+ void *sync_arg)
+{
+ return 0;
+}
+
+static void vigs_ttm_sync_obj_unref(void **sync_obj)
+{
+ struct vigs_fence* fence = *sync_obj;
+ vigs_fence_unref(fence);
+ *sync_obj = NULL;
+}
+
+static void *vigs_ttm_sync_obj_ref(void *sync_obj)
+{
+ vigs_fence_ref((struct vigs_fence*)sync_obj);
+ return sync_obj;
+}
+
+static int vigs_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
u32 placements[1];
struct ttm_placement placement;
.evict_flags = &vigs_ttm_evict_flags,
.move = &vigs_ttm_move,
.verify_access = &vigs_ttm_verify_access,
+ .sync_obj_signaled = vigs_ttm_sync_obj_signaled,
+ .sync_obj_wait = vigs_ttm_sync_obj_wait,
+ .sync_obj_flush = vigs_ttm_sync_obj_flush,
+ .sync_obj_unref = vigs_ttm_sync_obj_unref,
+ .sync_obj_ref = vigs_ttm_sync_obj_ref,
.fault_reserve_notify = &vigs_ttm_fault_reserve_notify,
.io_mem_reserve = &vigs_ttm_io_mem_reserve,
.io_mem_free = &vigs_ttm_io_mem_free,
#define _VIGS_PROTOCOL_H_
/*
- * VIGS protocol is a multiple request-single response protocol.
- *
- * + Requests come batched.
- * + The response is written after the request batch.
- *
- * Not all commands can be batched, only commands that don't have response
- * data can be batched.
+ * VIGS protocol is a multiple request-no response protocol.
*/
/*
* Bump this whenever protocol changes.
*/
-#define VIGS_PROTOCOL_VERSION 14
+#define VIGS_PROTOCOL_VERSION 15
typedef signed char vigsp_s8;
typedef signed short vigsp_s16;
typedef vigsp_u32 vigsp_surface_id;
typedef vigsp_u32 vigsp_offset;
typedef vigsp_u32 vigsp_color;
+typedef vigsp_u32 vigsp_fence_seq;
typedef enum
{
+ /*
+ * These command are guaranteed to sync on host, i.e.
+ * no fence is required.
+ * @{
+ */
vigsp_cmd_init = 0x0,
vigsp_cmd_reset = 0x1,
vigsp_cmd_exit = 0x2,
- vigsp_cmd_create_surface = 0x3,
- vigsp_cmd_destroy_surface = 0x4,
- vigsp_cmd_set_root_surface = 0x5,
+ vigsp_cmd_set_root_surface = 0x3,
+ /*
+ * @}
+ */
+ /*
+ * These commands are executed asynchronously.
+ * @{
+ */
+ vigsp_cmd_create_surface = 0x4,
+ vigsp_cmd_destroy_surface = 0x5,
vigsp_cmd_update_vram = 0x6,
vigsp_cmd_update_gpu = 0x7,
vigsp_cmd_copy = 0x8,
vigsp_cmd_solid_fill = 0x9,
-} vigsp_cmd;
-
-typedef enum
-{
/*
- * Start from 0x1 to detect host failures on target.
+ * @}
*/
- vigsp_status_success = 0x1,
- vigsp_status_bad_call = 0x2,
- vigsp_status_exec_error = 0x3,
-} vigsp_status;
+} vigsp_cmd;
typedef enum
{
struct vigsp_cmd_batch_header
{
- vigsp_u32 num_requests;
+ /*
+ * Fence sequence requested by this batch.
+ * 0 for none.
+ */
+ vigsp_fence_seq fence_seq;
+
+ /*
+ * Batch size starting from batch header.
+ * Can be 0.
+ */
+ vigsp_u32 size;
};
struct vigsp_cmd_request_header
vigsp_u32 size;
};
-struct vigsp_cmd_response_header
-{
- vigsp_status status;
-};
-
/*
* cmd_init
*
struct vigsp_cmd_init_request
{
vigsp_u32 client_version;
-};
-
-struct vigsp_cmd_init_response
-{
vigsp_u32 server_version;
};
#define VIGS_REG_EXEC 0
#define VIGS_REG_INT 8
+#define VIGS_REG_FENCE_LOWER 16
+#define VIGS_REG_FENCE_UPPER 24
#define VIGS_REG_INT_VBLANK_ENABLE 1
#define VIGS_REG_INT_VBLANK_PENDING 2
+#define VIGS_REG_INT_FENCE_ACK_PENDING 4
#endif
/*
* Version number.
*/
-#define YAGL_VERSION 21
+#define YAGL_VERSION 22
/*
* Device control codes magic.
/*
* Bump this whenever driver interface changes.
*/
-#define DRM_VIGS_DRIVER_VERSION 9
+#define DRM_VIGS_DRIVER_VERSION 10
/*
* Surface access flags.
unsigned long address;
};
+struct drm_vigs_gem_wait
+{
+ uint32_t handle;
+};
+
struct drm_vigs_surface_info
{
uint32_t handle;
int sync;
};
+struct drm_vigs_create_fence
+{
+ int send;
+ uint32_t handle;
+ uint32_t seq;
+};
+
+struct drm_vigs_fence_wait
+{
+ uint32_t handle;
+};
+
+struct drm_vigs_fence_signaled
+{
+ uint32_t handle;
+ int signaled;
+};
+
+struct drm_vigs_fence_unref
+{
+ uint32_t handle;
+};
+
#define DRM_VIGS_GET_PROTOCOL_VERSION 0x00
#define DRM_VIGS_CREATE_SURFACE 0x01
#define DRM_VIGS_CREATE_EXECBUFFER 0x02
#define DRM_VIGS_GEM_MAP 0x03
-#define DRM_VIGS_SURFACE_INFO 0x04
-#define DRM_VIGS_EXEC 0x05
-#define DRM_VIGS_SURFACE_SET_GPU_DIRTY 0x06
-#define DRM_VIGS_SURFACE_START_ACCESS 0x07
-#define DRM_VIGS_SURFACE_END_ACCESS 0x08
+#define DRM_VIGS_GEM_WAIT 0x04
+#define DRM_VIGS_SURFACE_INFO 0x05
+#define DRM_VIGS_EXEC 0x06
+#define DRM_VIGS_SURFACE_SET_GPU_DIRTY 0x07
+#define DRM_VIGS_SURFACE_START_ACCESS 0x08
+#define DRM_VIGS_SURFACE_END_ACCESS 0x09
+#define DRM_VIGS_CREATE_FENCE 0x0A
+#define DRM_VIGS_FENCE_WAIT 0x0B
+#define DRM_VIGS_FENCE_SIGNALED 0x0C
+#define DRM_VIGS_FENCE_UNREF 0x0D
#define DRM_IOCTL_VIGS_GET_PROTOCOL_VERSION DRM_IOR(DRM_COMMAND_BASE + \
DRM_VIGS_GET_PROTOCOL_VERSION, struct drm_vigs_get_protocol_version)
DRM_VIGS_CREATE_EXECBUFFER, struct drm_vigs_create_execbuffer)
#define DRM_IOCTL_VIGS_GEM_MAP DRM_IOWR(DRM_COMMAND_BASE + \
DRM_VIGS_GEM_MAP, struct drm_vigs_gem_map)
+#define DRM_IOCTL_VIGS_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_GEM_WAIT, struct drm_vigs_gem_wait)
#define DRM_IOCTL_VIGS_SURFACE_INFO DRM_IOWR(DRM_COMMAND_BASE + \
DRM_VIGS_SURFACE_INFO, struct drm_vigs_surface_info)
#define DRM_IOCTL_VIGS_EXEC DRM_IOW(DRM_COMMAND_BASE + \
DRM_VIGS_SURFACE_START_ACCESS, struct drm_vigs_surface_start_access)
#define DRM_IOCTL_VIGS_SURFACE_END_ACCESS DRM_IOW(DRM_COMMAND_BASE + \
DRM_VIGS_SURFACE_END_ACCESS, struct drm_vigs_surface_end_access)
+#define DRM_IOCTL_VIGS_CREATE_FENCE DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_CREATE_FENCE, struct drm_vigs_create_fence)
+#define DRM_IOCTL_VIGS_FENCE_WAIT DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_FENCE_WAIT, struct drm_vigs_fence_wait)
+#define DRM_IOCTL_VIGS_FENCE_SIGNALED DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_FENCE_SIGNALED, struct drm_vigs_fence_signaled)
+#define DRM_IOCTL_VIGS_FENCE_UNREF DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_FENCE_UNREF, struct drm_vigs_fence_unref)
#endif