vigs_drm-y := main.o \
vigs_driver.o \
vigs_gem.o \
+ vigs_surface.o \
+ vigs_execbuffer.o \
vigs_device.o \
vigs_mman.o \
- vigs_buffer.o \
vigs_crtc.o \
vigs_output.o \
vigs_framebuffer.o \
+++ /dev/null
-#include "vigs_buffer.h"
-#include "vigs_mman.h"
-#include <drm/vigs_drm.h>
-#include <ttm/ttm_placement.h>
-
-static void vigs_buffer_destroy(struct kref *kref)
-{
- struct vigs_buffer_object *vigs_bo = kref_to_vigs_buffer(kref);
- struct ttm_buffer_object *bo = &(vigs_bo->base);
-
- vigs_buffer_kunmap(vigs_bo);
-
- DRM_DEBUG_DRIVER("buffer destroyed (dom = %u, off = %lu, sz = %lu)\n",
- vigs_bo->domain,
- vigs_buffer_offset(vigs_bo),
- vigs_buffer_accounted_size(vigs_bo));
-
- ttm_bo_unref(&bo);
-}
-
-static void vigs_buffer_base_destroy(struct ttm_buffer_object *bo)
-{
- struct vigs_buffer_object *vigs_bo = bo_to_vigs_buffer(bo);
-
- kfree(vigs_bo);
-}
-
-int vigs_buffer_create(struct vigs_mman *mman,
- unsigned long size,
- bool kernel,
- u32 domain,
- struct vigs_buffer_object **vigs_bo)
-{
- u32 placements[1];
- struct ttm_placement placement;
- enum ttm_bo_type type;
- int ret = 0;
-
- if (size == 0) {
- return -EINVAL;
- }
-
- *vigs_bo = NULL;
-
- if (domain == DRM_VIGS_GEM_DOMAIN_VRAM) {
- placements[0] =
- TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_NO_EVICT;
- } else if (domain == DRM_VIGS_GEM_DOMAIN_RAM) {
- placements[0] =
- TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | TTM_PL_FLAG_NO_EVICT;
- } else {
- return -EINVAL;
- }
-
- memset(&placement, 0, sizeof(placement));
-
- placement.placement = placements;
- placement.busy_placement = placements;
- placement.num_placement = 1;
- placement.num_busy_placement = 1;
-
- if (kernel) {
- type = ttm_bo_type_kernel;
- } else {
- type = ttm_bo_type_device;
- }
-
- *vigs_bo = kzalloc(sizeof(**vigs_bo), GFP_KERNEL);
-
- if (!*vigs_bo) {
- return -ENOMEM;
- }
-
- ret = ttm_bo_init(&mman->bo_dev, &(*vigs_bo)->base, size, type,
- &placement, 0, 0,
- false, NULL, size,
- &vigs_buffer_base_destroy);
-
- if (ret != 0) {
- /*
- * '*vigs_bo' is freed by 'ttm_bo_init'
- */
- *vigs_bo = NULL;
- return ret;
- }
-
- (*vigs_bo)->domain = domain;
-
- kref_init(&(*vigs_bo)->kref);
-
- DRM_DEBUG_DRIVER("buffer created (dom = %u, off = %lu, sz = %lu)\n",
- (*vigs_bo)->domain,
- vigs_buffer_offset(*vigs_bo),
- vigs_buffer_accounted_size(*vigs_bo));
-
- return 0;
-}
-
-void vigs_buffer_acquire(struct vigs_buffer_object *vigs_bo)
-{
- if (vigs_bo) {
- kref_get(&vigs_bo->kref);
- }
-}
-
-void vigs_buffer_release(struct vigs_buffer_object *vigs_bo)
-{
- if (vigs_bo) {
- kref_put(&vigs_bo->kref, vigs_buffer_destroy);
- }
-}
-
-int vigs_buffer_kmap(struct vigs_buffer_object *vigs_bo)
-{
- bool is_iomem;
- int ret;
-
- if (vigs_bo->kptr) {
- return 0;
- }
-
- ret = ttm_bo_kmap(&vigs_bo->base,
- 0,
- vigs_bo->base.num_pages,
- &vigs_bo->kmap);
-
- if (ret != 0) {
- return ret;
- }
-
- vigs_bo->kptr = ttm_kmap_obj_virtual(&vigs_bo->kmap, &is_iomem);
-
- DRM_DEBUG_DRIVER("buffer (dom = %u, off = %lu, sz = %lu) mapped to %p\n",
- vigs_bo->domain,
- vigs_buffer_offset(vigs_bo),
- vigs_buffer_accounted_size(vigs_bo),
- vigs_bo->kptr);
-
- return 0;
-}
-
-void vigs_buffer_kunmap(struct vigs_buffer_object *vigs_bo)
-{
- if (vigs_bo->kptr == NULL) {
- return;
- }
-
- vigs_bo->kptr = NULL;
-
- ttm_bo_kunmap(&vigs_bo->kmap);
-
- DRM_DEBUG_DRIVER("buffer (dom = %u, off = %lu, sz = %lu) unmapped\n",
- vigs_bo->domain,
- vigs_buffer_offset(vigs_bo),
- vigs_buffer_accounted_size(vigs_bo));
-}
+++ /dev/null
-#ifndef _VIGS_BUFFER_H_
-#define _VIGS_BUFFER_H_
-
-#include "drmP.h"
-#include <ttm/ttm_bo_driver.h>
-
-struct vigs_mman;
-
-struct vigs_buffer_object
-{
- struct ttm_buffer_object base;
-
- u32 domain;
-
- /*
- * ttm_buffer_object::destroy isn't good enough for us because
- * we want to 'vigs_buffer_kunmap' before object destruction and
- * it's too late for that in ttm_buffer_object::destroy.
- */
- struct kref kref;
-
- /*
- * Valid only after successful call to 'vigs_buffer_kmap'.
- * @{
- */
-
- struct ttm_bo_kmap_obj kmap;
- void *kptr; /* Kernel pointer to buffer data. */
-
- /*
- * @}
- */
-};
-
-static inline struct vigs_buffer_object *bo_to_vigs_buffer(struct ttm_buffer_object *bo)
-{
- return container_of(bo, struct vigs_buffer_object, base);
-}
-
-static inline struct vigs_buffer_object *kref_to_vigs_buffer(struct kref *kref)
-{
- return container_of(kref, struct vigs_buffer_object, kref);
-}
-
-/*
- * when 'kernel' is true the buffer will be accessible from
- * kernel only.
- * 'domain' must be either VRAM or RAM. CPU domain is not supported.
- */
-int vigs_buffer_create(struct vigs_mman *mman,
- unsigned long size,
- bool kernel,
- u32 domain,
- struct vigs_buffer_object **vigs_bo);
-
-/*
- * Page aligned buffer size.
- */
-static inline unsigned long vigs_buffer_size(struct vigs_buffer_object *vigs_bo)
-{
- return vigs_bo->base.num_pages << PAGE_SHIFT;
-}
-
-/*
- * Actual size that was passed to 'vigs_buffer_create'.
- */
-static inline unsigned long vigs_buffer_accounted_size(struct vigs_buffer_object *vigs_bo)
-{
- return vigs_bo->base.acc_size;
-}
-
-/*
- * Buffer offset relative to 0.
- */
-static inline unsigned long vigs_buffer_offset(struct vigs_buffer_object *vigs_bo)
-{
- return vigs_bo->base.offset;
-}
-
-/*
- * Buffer offset relative to DRM_FILE_OFFSET. For kernel buffers it's always 0.
- */
-static inline u64 vigs_buffer_mmap_offset(struct vigs_buffer_object *vigs_bo)
-{
- return vigs_bo->base.addr_space_offset;
-}
-
-static inline void vigs_buffer_reserve(struct vigs_buffer_object *vigs_bo)
-{
- int ret;
-
- ret = ttm_bo_reserve(&vigs_bo->base, false, false, false, 0);
-
- BUG_ON(ret != 0);
-}
-
-static inline void vigs_buffer_unreserve(struct vigs_buffer_object *vigs_bo)
-{
- ttm_bo_unreserve(&vigs_bo->base);
-}
-
-/*
- * Functions below MUST NOT be called between
- * vigs_buffer_reserve/vigs_buffer_unreserve.
- * @{
- */
-
-/*
- * Increments ref count.
- * Passing NULL won't hurt, this is for convenience.
- */
-void vigs_buffer_acquire(struct vigs_buffer_object *vigs_bo);
-
-/*
- * Decrements ref count, releases and sets 'vigs_bo' to NULL when 0.
- * Passing NULL won't hurt, this is for convenience.
- */
-void vigs_buffer_release(struct vigs_buffer_object *vigs_bo);
-
-/*
- * @}
- */
-
-/*
- * Functions below MUST be called between
- * vigs_buffer_reserve/vigs_buffer_unreserve if simultaneous access
- * from different threads is possible.
- * @{
- */
-
-int vigs_buffer_kmap(struct vigs_buffer_object *vigs_bo);
-
-void vigs_buffer_kunmap(struct vigs_buffer_object *vigs_bo);
-
-/*
- * @}
- */
-
-#endif
#include "vigs_comm.h"
#include "vigs_device.h"
-#include "vigs_gem.h"
-#include "vigs_buffer.h"
+#include "vigs_execbuffer.h"
#include <drm/vigs_drm.h>
static int vigs_comm_prepare(struct vigs_comm *comm,
{
int ret;
void *ptr;
+ struct vigsp_cmd_batch_header *batch_header;
struct vigsp_cmd_request_header *request_header;
- unsigned long total_size = sizeof(struct vigsp_cmd_request_header) +
+ unsigned long total_size = sizeof(*batch_header) +
+ sizeof(*request_header) +
request_size +
sizeof(struct vigsp_cmd_response_header) +
response_size;
- if (!comm->cmd_gem || (vigs_buffer_size(comm->cmd_gem->bo) < total_size)) {
- if (comm->cmd_gem) {
- drm_gem_object_unreference_unlocked(&comm->cmd_gem->base);
- comm->cmd_gem = NULL;
+ if (!comm->execbuffer || (vigs_gem_size(&comm->execbuffer->gem) < total_size)) {
+ if (comm->execbuffer) {
+ drm_gem_object_unreference_unlocked(&comm->execbuffer->gem.base);
+ comm->execbuffer = NULL;
}
- ret = vigs_gem_create(comm->vigs_dev,
- total_size,
- true,
- DRM_VIGS_GEM_DOMAIN_RAM,
- &comm->cmd_gem);
+ ret = vigs_execbuffer_create(comm->vigs_dev,
+ total_size,
+ true,
+ &comm->execbuffer);
if (ret != 0) {
- DRM_ERROR("unable to create command GEM\n");
+ DRM_ERROR("unable to create execbuffer\n");
return ret;
}
- ret = vigs_buffer_kmap(comm->cmd_gem->bo);
+ vigs_gem_reserve(&comm->execbuffer->gem);
+
+ ret = vigs_gem_kmap(&comm->execbuffer->gem);
+
+ vigs_gem_unreserve(&comm->execbuffer->gem);
if (ret != 0) {
- DRM_ERROR("unable to kmap command GEM\n");
+ DRM_ERROR("unable to kmap execbuffer\n");
- drm_gem_object_unreference_unlocked(&comm->cmd_gem->base);
- comm->cmd_gem = NULL;
+ drm_gem_object_unreference_unlocked(&comm->execbuffer->gem.base);
+ comm->execbuffer = NULL;
return ret;
}
}
- ptr = comm->cmd_gem->bo->kptr;
+ ptr = comm->execbuffer->gem.kptr;
- memset(ptr, 0, vigs_buffer_size(comm->cmd_gem->bo));
+ memset(ptr, 0, vigs_gem_size(&comm->execbuffer->gem));
- request_header = ptr;
+ batch_header = ptr;
+ request_header = (struct vigsp_cmd_request_header*)(batch_header + 1);
+
+ batch_header->num_requests = 1;
request_header->cmd = cmd;
- request_header->response_offset = request_size;
+ request_header->size = request_size;
if (request) {
*request = (request_header + 1);
return 0;
}
-static int vigs_comm_exec(struct vigs_comm *comm)
+static int vigs_comm_exec_internal(struct vigs_comm *comm)
{
- struct vigsp_cmd_request_header *request_header = comm->cmd_gem->bo->kptr;
+ struct vigsp_cmd_batch_header *batch_header = comm->execbuffer->gem.kptr;
+ struct vigsp_cmd_request_header *request_header =
+ (struct vigsp_cmd_request_header*)(batch_header + 1);
struct vigsp_cmd_response_header *response_header =
- (void*)(request_header + 1) + request_header->response_offset;
+ (struct vigsp_cmd_response_header*)((u8*)(request_header + 1) +
+ request_header->size);
/*
- * 'writel' already has the mem barrier, so it's ok to just access the
- * response data afterwards.
+ * TODO: remove after DRI2 fixes.
*/
+ return 0;
- writel(vigs_buffer_offset(comm->cmd_gem->bo),
- VIGS_USER_PTR(comm->io_ptr, 0) + VIGS_REG_RAM_OFFSET);
+ vigs_comm_exec(comm, comm->execbuffer);
switch (response_header->status) {
case vigsp_status_success:
request->client_version = VIGS_PROTOCOL_VERSION;
- ret = vigs_comm_exec(comm);
+ ret = vigs_comm_exec_internal(comm);
if (ret != 0) {
return ret;
}
+ /*
+ * TODO: remove after DRI2 fixes.
+ */
+ response->server_version = VIGS_PROTOCOL_VERSION;
+
if (response->server_version != VIGS_PROTOCOL_VERSION) {
DRM_ERROR("protocol version mismatch, expected %u, actual %u\n",
VIGS_PROTOCOL_VERSION,
return;
}
- vigs_comm_exec(comm);
+ vigs_comm_exec_internal(comm);
}
int vigs_comm_create(struct vigs_device *vigs_dev,
goto fail2;
}
- /*
- * We're always guaranteed that 'user_map' has at least one element
- * and we should use it, just stuff in 'this' pointer in order
- * not to loose this slot.
- */
- vigs_dev->user_map[0] = (struct drm_file*)(*comm);
-
return 0;
fail2:
- if ((*comm)->cmd_gem) {
- drm_gem_object_unreference_unlocked(&(*comm)->cmd_gem->base);
+ if ((*comm)->execbuffer) {
+ drm_gem_object_unreference_unlocked(&(*comm)->execbuffer->gem.base);
}
kfree(*comm);
fail1:
DRM_DEBUG_DRIVER("enter\n");
vigs_comm_exit(comm);
- comm->vigs_dev->user_map[0] = NULL;
- if (comm->cmd_gem) {
- drm_gem_object_unreference_unlocked(&comm->cmd_gem->base);
+ if (comm->execbuffer) {
+ drm_gem_object_unreference_unlocked(&comm->execbuffer->gem.base);
}
kfree(comm);
}
+void vigs_comm_exec(struct vigs_comm *comm,
+ struct vigs_execbuffer *execbuffer)
+{
+ writel(vigs_gem_offset(&execbuffer->gem), comm->io_ptr);
+}
+
int vigs_comm_reset(struct vigs_comm *comm)
{
int ret;
return ret;
}
- return vigs_comm_exec(comm);
+ return vigs_comm_exec_internal(comm);
}
int vigs_comm_create_surface(struct vigs_comm *comm,
- unsigned int width,
- unsigned int height,
- unsigned int stride,
+ u32 width,
+ u32 height,
+ u32 stride,
vigsp_surface_format format,
- struct vigs_gem_object *sfc_gem,
- vigsp_surface_id *id)
+ vigsp_surface_id id)
{
int ret;
struct vigsp_cmd_create_surface_request *request;
- struct vigsp_cmd_create_surface_response *response;
- DRM_DEBUG_DRIVER("width = %u, height = %u, stride = %u, fmt = %d\n",
+ DRM_DEBUG_DRIVER("width = %u, height = %u, stride = %u, fmt = %d, id = 0x%llX\n",
width,
height,
stride,
- format);
+ format,
+ id);
ret = vigs_comm_prepare(comm,
vigsp_cmd_create_surface,
sizeof(*request),
- sizeof(*response),
+ 0,
(void**)&request,
- (void**)&response);
+ NULL);
if (ret != 0) {
return ret;
request->height = height;
request->stride = stride;
request->format = format;
- request->vram_offset = vigs_buffer_offset(sfc_gem->bo);
-
- ret = vigs_comm_exec(comm);
-
- if (ret != 0) {
- return ret;
- }
-
- DRM_DEBUG_DRIVER("created = %u\n", response->id);
-
- if (id) {
- *id = response->id;
- }
+ request->id = id;
- return 0;
+ return vigs_comm_exec_internal(comm);
}
int vigs_comm_destroy_surface(struct vigs_comm *comm, vigsp_surface_id id)
int ret;
struct vigsp_cmd_destroy_surface_request *request;
- DRM_DEBUG_DRIVER("id = %u\n", id);
+ DRM_DEBUG_DRIVER("id = 0x%llX\n", id);
ret = vigs_comm_prepare(comm,
vigsp_cmd_destroy_surface,
request->id = id;
- return vigs_comm_exec(comm);
+ return vigs_comm_exec_internal(comm);
}
-int vigs_comm_set_root_surface(struct vigs_comm *comm, vigsp_surface_id id)
+int vigs_comm_set_root_surface(struct vigs_comm *comm,
+ vigsp_surface_id id,
+ vigsp_offset offset)
{
int ret;
struct vigsp_cmd_set_root_surface_request *request;
- DRM_DEBUG_DRIVER("id = %u\n", id);
+ DRM_DEBUG_DRIVER("id = 0x%llX\n", id);
ret = vigs_comm_prepare(comm,
vigsp_cmd_set_root_surface,
}
request->id = id;
+ request->offset = offset;
- return vigs_comm_exec(comm);
+ return vigs_comm_exec_internal(comm);
}
int vigs_comm_get_protocol_version_ioctl(struct drm_device *drm_dev,
struct drm_device;
struct drm_file;
struct vigs_device;
-struct vigs_gem_object;
+struct vigs_execbuffer;
struct vigs_comm
{
*/
void __iomem *io_ptr;
- struct vigs_gem_object *cmd_gem;
+ /*
+ * For internal use.
+ */
+ struct vigs_execbuffer *execbuffer;
};
int vigs_comm_create(struct vigs_device *vigs_dev,
void vigs_comm_destroy(struct vigs_comm *comm);
+void vigs_comm_exec(struct vigs_comm *comm,
+ struct vigs_execbuffer *execbuffer);
+
int vigs_comm_reset(struct vigs_comm *comm);
int vigs_comm_create_surface(struct vigs_comm *comm,
- unsigned int width,
- unsigned int height,
- unsigned int stride,
+ u32 width,
+ u32 height,
+ u32 stride,
vigsp_surface_format format,
- struct vigs_gem_object *sfc_gem,
- vigsp_surface_id *id);
+ vigsp_surface_id id);
int vigs_comm_destroy_surface(struct vigs_comm *comm, vigsp_surface_id id);
-int vigs_comm_set_root_surface(struct vigs_comm *comm, vigsp_surface_id id);
+int vigs_comm_set_root_surface(struct vigs_comm *comm,
+ vigsp_surface_id id,
+ vigsp_offset offset);
/*
* IOCTLs
#include "vigs_crtc.h"
#include "vigs_device.h"
#include "vigs_framebuffer.h"
+#include "vigs_surface.h"
#include "vigs_comm.h"
#include "drm_crtc_helper.h"
vigs_fb = fb_to_vigs_fb(crtc->fb);
- ret = vigs_comm_set_root_surface(vigs_dev->comm, vigs_fb->sfc_id);
+ ret = vigs_framebuffer_pin(vigs_fb);
if (ret != 0) {
return ret;
}
+ ret = vigs_comm_set_root_surface(vigs_dev->comm,
+ vigs_surface_id(vigs_fb->fb_sfc),
+ vigs_gem_offset(&vigs_fb->fb_sfc->gem));
+
+ if (ret != 0) {
+ vigs_framebuffer_unpin(vigs_fb);
+ return ret;
+ }
+
+ if (old_fb) {
+ vigs_framebuffer_unpin(fb_to_vigs_fb(old_fb));
+ }
+
return 0;
}
return;
}
- vigs_comm_set_root_surface(vigs_dev->comm, 0);
+ vigs_comm_set_root_surface(vigs_dev->comm, 0, 0);
+
+ vigs_framebuffer_unpin(fb_to_vigs_fb(crtc->fb));
}
static const struct drm_crtc_funcs vigs_crtc_funcs =
#include "vigs_framebuffer.h"
#include "vigs_comm.h"
#include "vigs_fbdev.h"
+#include "vigs_execbuffer.h"
#include <drm/vigs_drm.h>
int vigs_device_init(struct vigs_device *vigs_dev,
goto fail1;
}
- if ((vigs_dev->io_size < VIGS_REGS_SIZE) ||
- ((vigs_dev->io_size % VIGS_REGS_SIZE) != 0)) {
+ if ((vigs_dev->io_size < sizeof(void*)) ||
+ ((vigs_dev->io_size % sizeof(void*)) != 0)) {
DRM_ERROR("IO bar has bad size: %u bytes\n", vigs_dev->io_size);
ret = -ENODEV;
goto fail1;
goto fail2;
}
- vigs_dev->user_map_length = (vigs_dev->io_size / VIGS_REGS_SIZE);
-
- vigs_dev->user_map =
- kzalloc((sizeof(*vigs_dev->user_map) * vigs_dev->user_map_length),
- GFP_KERNEL);
-
- if (!vigs_dev->user_map) {
- ret = -ENOMEM;
- goto fail3;
- }
-
- mutex_init(&vigs_dev->user_mutex);
-
ret = vigs_comm_create(vigs_dev, &vigs_dev->comm);
if (ret != 0) {
- goto fail4;
+ goto fail3;
}
drm_mode_config_init(vigs_dev->drm_dev);
ret = vigs_crtc_init(vigs_dev);
if (ret != 0) {
- goto fail5;
+ goto fail4;
}
ret = vigs_output_init(vigs_dev);
if (ret != 0) {
- goto fail5;
+ goto fail4;
}
ret = vigs_fbdev_create(vigs_dev, &vigs_dev->fbdev);
if (ret != 0) {
- goto fail5;
+ goto fail4;
}
return 0;
-fail5:
+fail4:
drm_mode_config_cleanup(vigs_dev->drm_dev);
vigs_comm_destroy(vigs_dev->comm);
-fail4:
- kfree(vigs_dev->user_map);
fail3:
vigs_mman_destroy(vigs_dev->mman);
fail2:
vigs_fbdev_destroy(vigs_dev->fbdev);
drm_mode_config_cleanup(vigs_dev->drm_dev);
vigs_comm_destroy(vigs_dev->comm);
- kfree(vigs_dev->user_map);
vigs_mman_destroy(vigs_dev->mman);
drm_rmmap(vigs_dev->drm_dev, vigs_dev->io_map);
}
return vigs_mman_mmap(vigs_dev->mman, filp, vma);
}
-int vigs_device_user_enter_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv)
+int vigs_device_exec_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
{
struct vigs_device *vigs_dev = drm_dev->dev_private;
- struct drm_vigs_user_enter *args = data;
- int i;
- int index = -1;
-
- mutex_lock(&vigs_dev->user_mutex);
-
- for (i = 0; i < vigs_dev->user_map_length; ++i) {
- if (!vigs_dev->user_map[i]) {
- index = i;
- vigs_dev->user_map[i] = file_priv;
- break;
- }
- }
-
- if (index == -1) {
- DRM_ERROR("no more free user slots\n");
- mutex_unlock(&vigs_dev->user_mutex);
- return -ENOSPC;
- }
-
-#if defined(__i386__) || defined(__x86_64__)
- /*
- * Write CR registers.
- * @{
- */
-
- writel(read_cr0(), VIGS_USER_PTR(vigs_dev->io_map->handle, index) + VIGS_REG_CR0);
- writel(0, VIGS_USER_PTR(vigs_dev->io_map->handle, index) + VIGS_REG_CR1);
- writel(read_cr2(), VIGS_USER_PTR(vigs_dev->io_map->handle, index) + VIGS_REG_CR2);
- writel(read_cr3(), VIGS_USER_PTR(vigs_dev->io_map->handle, index) + VIGS_REG_CR3);
- writel(read_cr4(), VIGS_USER_PTR(vigs_dev->io_map->handle, index) + VIGS_REG_CR4);
+ struct drm_vigs_exec *args = data;
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+ struct vigs_execbuffer *execbuffer;
- /*
- * @}
- */
-#endif
+ gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
- mutex_unlock(&vigs_dev->user_mutex);
-
- args->index = index;
-
- DRM_DEBUG_DRIVER("user %u entered\n", args->index);
-
- return 0;
-}
-
-int vigs_device_user_leave_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv)
-{
- struct vigs_device *vigs_dev = drm_dev->dev_private;
- struct drm_vigs_user_leave *args = data;
-
- if (args->index >= vigs_dev->user_map_length) {
- DRM_ERROR("invalid index: %u\n", args->index);
- return -EINVAL;
+ if (gem == NULL) {
+ return -ENOENT;
}
- mutex_lock(&vigs_dev->user_mutex);
+ vigs_gem = gem_to_vigs_gem(gem);
- if (vigs_dev->user_map[args->index] != file_priv) {
- DRM_ERROR("user doesn't own index %u\n", args->index);
- mutex_unlock(&vigs_dev->user_mutex);
- return -EINVAL;
+ if (vigs_gem->type != VIGS_GEM_TYPE_EXECBUFFER) {
+ return -ENOENT;
}
- vigs_dev->user_map[args->index] = NULL;
+ execbuffer = vigs_gem_to_vigs_execbuffer(vigs_gem);
- mutex_unlock(&vigs_dev->user_mutex);
+ vigs_comm_exec(vigs_dev->comm, execbuffer);
- DRM_DEBUG_DRIVER("user %u left\n", args->index);
+ drm_gem_object_unreference_unlocked(gem);
return 0;
}
-
-void vigs_device_user_leave_all(struct vigs_device *vigs_dev,
- struct drm_file *file_priv)
-{
- int i;
-
- mutex_lock(&vigs_dev->user_mutex);
-
- for (i = 0; i < vigs_dev->user_map_length; ++i) {
- if (vigs_dev->user_map[i] == file_priv) {
- vigs_dev->user_map[i] = NULL;
- DRM_DEBUG_DRIVER("user %d left\n", i);
- }
- }
-
- mutex_unlock(&vigs_dev->user_mutex);
-}
struct vigs_comm;
struct vigs_fbdev;
-#define VIGS_REG_RAM_OFFSET 0
-#define VIGS_REG_CR0 8
-#define VIGS_REG_CR1 16
-#define VIGS_REG_CR2 24
-#define VIGS_REG_CR3 32
-#define VIGS_REG_CR4 40
-#define VIGS_REGS_SIZE 64
-
-#define VIGS_USER_PTR(io_ptr, index) ((io_ptr) + ((index) * VIGS_REGS_SIZE))
-
struct vigs_device
{
struct device *dev;
struct vigs_mman *mman;
- /* slot contains DRM file pointer if user is active, NULL if slot can be used. */
- struct drm_file **user_map;
-
- /* Length of 'user_map'. Must be at least 1. */
- int user_map_length;
-
- /* Mutex used to serialize access to user_map. */
- struct mutex user_mutex;
-
- /* Communicator instance for kernel itself, takes slot #0 in user_map. */
struct vigs_comm *comm;
struct vigs_fbdev *fbdev;
* @{
*/
-int vigs_device_user_enter_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv);
-
-int vigs_device_user_leave_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv);
-
-void vigs_device_user_leave_all(struct vigs_device *vigs_dev,
- struct drm_file *file_priv);
+int vigs_device_exec_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
/*
* @}
#include "vigs_device.h"
#include "vigs_fbdev.h"
#include "vigs_comm.h"
-#include "vigs_framebuffer.h"
+#include "vigs_surface.h"
+#include "vigs_execbuffer.h"
#include "drmP.h"
#include "drm.h"
#include <linux/module.h>
{
DRM_IOCTL_DEF_DRV(VIGS_GET_PROTOCOL_VERSION, vigs_comm_get_protocol_version_ioctl,
DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIGS_GEM_CREATE, vigs_gem_create_ioctl,
- DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIGS_GEM_MMAP, vigs_gem_mmap_ioctl,
- DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIGS_GEM_INFO, vigs_gem_info_ioctl,
- DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIGS_USER_ENTER, vigs_device_user_enter_ioctl,
- DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIGS_USER_LEAVE, vigs_device_user_leave_ioctl,
- DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIGS_FB_INFO, vigs_framebuffer_info_ioctl,
- DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_CREATE_SURFACE, vigs_surface_create_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_CREATE_EXECBUFFER, vigs_execbuffer_create_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_SURFACE_INFO, vigs_surface_info_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_EXEC, vigs_device_exec_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
};
static const struct file_operations vigs_drm_driver_fops =
static void vigs_drm_postclose(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct vigs_device *vigs_dev = dev->dev_private;
-
DRM_DEBUG_DRIVER("enter\n");
-
- vigs_device_user_leave_all(vigs_dev, file_priv);
}
static void vigs_drm_lastclose(struct drm_device *dev)
--- /dev/null
+#include "vigs_execbuffer.h"
+#include <drm/vigs_drm.h>
+
+static void vigs_execbuffer_destroy(struct vigs_gem_object *gem)
+{
+ struct vigs_execbuffer *execbuffer = vigs_gem_to_vigs_execbuffer(gem);
+
+ vigs_gem_cleanup(&execbuffer->gem);
+}
+
+int vigs_execbuffer_create(struct vigs_device *vigs_dev,
+ unsigned long size,
+ bool kernel,
+ struct vigs_execbuffer **execbuffer)
+{
+ int ret = 0;
+
+ *execbuffer = kzalloc(sizeof(**execbuffer), GFP_KERNEL);
+
+ if (!*execbuffer) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ ret = vigs_gem_init(&(*execbuffer)->gem,
+ vigs_dev,
+ VIGS_GEM_TYPE_EXECBUFFER,
+ size,
+ kernel,
+ &vigs_execbuffer_destroy);
+
+ if (ret != 0) {
+ goto fail1;
+ }
+
+ return 0;
+
+fail1:
+ *execbuffer = NULL;
+
+ return ret;
+}
+
+int vigs_execbuffer_create_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct drm_vigs_create_execbuffer *args = data;
+ struct vigs_execbuffer *execbuffer = NULL;
+ uint32_t handle;
+ int ret;
+
+ ret = vigs_execbuffer_create(vigs_dev,
+ args->size,
+ false,
+ &execbuffer);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ ret = drm_gem_handle_create(file_priv,
+ &execbuffer->gem.base,
+ &handle);
+
+ drm_gem_object_unreference_unlocked(&execbuffer->gem.base);
+
+ if (ret == 0) {
+ args->handle = handle;
+ }
+
+ return ret;
+}
--- /dev/null
+#ifndef _VIGS_EXECBUFFER_H_
+#define _VIGS_EXECBUFFER_H_
+
+#include "drmP.h"
+#include "vigs_gem.h"
+
+struct vigs_execbuffer
+{
+ /*
+ * Must be first member!
+ */
+ struct vigs_gem_object gem;
+};
+
+static inline struct vigs_execbuffer *vigs_gem_to_vigs_execbuffer(struct vigs_gem_object *vigs_gem)
+{
+ return container_of(vigs_gem, struct vigs_execbuffer, gem);
+}
+
+int vigs_execbuffer_create(struct vigs_device *vigs_dev,
+ unsigned long size,
+ bool kernel,
+ struct vigs_execbuffer **execbuffer);
+
+/*
+ * IOCTLs
+ * @{
+ */
+
+int vigs_execbuffer_create_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
+#endif
#include "vigs_fbdev.h"
#include "vigs_device.h"
-#include "vigs_gem.h"
-#include "vigs_buffer.h"
+#include "vigs_surface.h"
#include "vigs_framebuffer.h"
#include "vigs_output.h"
#include "drm_crtc_helper.h"
struct drm_fb_helper_surface_size *sizes)
{
struct vigs_device *vigs_dev = helper->dev->dev_private;
- struct vigs_gem_object *fb_gem;
+ struct vigs_surface *fb_sfc;
struct vigs_framebuffer *vigs_fb;
struct fb_info *fbi;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ vigsp_surface_format format;
unsigned long offset;
int dpi;
int ret;
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
+ switch (mode_cmd.pixel_format) {
+ case DRM_FORMAT_XRGB8888:
+ format = vigsp_surface_bgrx8888;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ format = vigsp_surface_bgra8888;
+ break;
+ default:
+ DRM_DEBUG_KMS("unsupported pixel format: %u\n", mode_cmd.pixel_format);
+ ret = -EINVAL;
+ goto fail1;
+ }
+
fbi = framebuffer_alloc(0, &vigs_dev->pci_dev->dev);
if (!fbi) {
goto fail1;
}
- ret = vigs_gem_create(vigs_dev,
- (mode_cmd.pitches[0] * mode_cmd.height),
- false,
- DRM_VIGS_GEM_DOMAIN_VRAM,
- &fb_gem);
+ ret = vigs_surface_create(vigs_dev,
+ mode_cmd.width,
+ mode_cmd.height,
+ mode_cmd.pitches[0],
+ format,
+ &fb_sfc);
if (ret != 0) {
goto fail2;
ret = vigs_framebuffer_create(vigs_dev,
&mode_cmd,
- fb_gem,
+ fb_sfc,
&vigs_fb);
- drm_gem_object_unreference_unlocked(&fb_gem->base);
+ drm_gem_object_unreference_unlocked(&fb_sfc->gem.base);
if (ret != 0) {
goto fail2;
goto fail3;
}
- ret = vigs_buffer_kmap(fb_gem->bo);
+ ret = vigs_framebuffer_pin(vigs_fb);
+
+ if (ret != 0) {
+ goto fail4;
+ }
+
+ vigs_gem_reserve(&fb_sfc->gem);
+
+ ret = vigs_gem_kmap(&fb_sfc->gem);
if (ret != 0) {
+ vigs_gem_unreserve(&fb_sfc->gem);
DRM_ERROR("unable to kmap framebuffer GEM\n");
goto fail4;
}
+ vigs_gem_unreserve(&fb_sfc->gem);
+
strcpy(fbi->fix.id, "VIGS");
drm_fb_helper_fill_fix(fbi, vigs_fb->base.pitches[0], vigs_fb->base.depth);
* TODO: "vram_base + ..." - not nice, make a function for this.
*/
fbi->fix.smem_start = vigs_dev->vram_base +
- vigs_buffer_offset(fb_gem->bo) +
+ vigs_gem_offset(&fb_sfc->gem) +
offset;
- fbi->screen_base = fb_gem->bo->kptr + offset;
+ fbi->screen_base = fb_sfc->gem.kptr + offset;
fbi->screen_size = fbi->fix.smem_len = vigs_fb->base.width *
vigs_fb->base.height *
(vigs_fb->base.bits_per_pixel >> 3);
DRM_DEBUG_KMS("enter\n");
/*
- * With !helper->fb, it means that this funcion is called first time
+ * With !helper->fb, it means that this function is called first time
* and after that, the helper->fb would be used as clone mode.
*/
#include "vigs_framebuffer.h"
#include "vigs_device.h"
-#include "vigs_gem.h"
+#include "vigs_surface.h"
#include "vigs_fbdev.h"
#include "vigs_comm.h"
#include "drm_crtc_helper.h"
{
struct vigs_device *vigs_dev = drm_dev->dev_private;
struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+ struct vigs_surface *vigs_sfc;
struct vigs_framebuffer *vigs_fb;
int ret;
return NULL;
}
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ if (vigs_gem->type != VIGS_GEM_TYPE_SURFACE) {
+ DRM_ERROR("GEM is not a surface, handle = %u\n", mode_cmd->handles[0]);
+ return NULL;
+ }
+
+ vigs_sfc = vigs_gem_to_vigs_surface(vigs_gem);
+
ret = vigs_framebuffer_create(vigs_dev,
mode_cmd,
- gem_to_vigs_gem(gem),
+ vigs_sfc,
&vigs_fb);
drm_gem_object_unreference_unlocked(gem);
drm_framebuffer_cleanup(fb);
/*
- * Here we can issue surface destroy command, since it's no longer
- * root surface, but it still exists on host.
- */
-
- vigs_comm_destroy_surface(vigs_fb->comm, vigs_fb->sfc_id);
-
- /*
* And we can finally free the GEM.
*/
- drm_gem_object_unreference_unlocked(&vigs_fb->fb_gem->base);
+ drm_gem_object_unreference_unlocked(&vigs_fb->fb_sfc->gem.base);
kfree(vigs_fb);
}
DRM_DEBUG_KMS("enter\n");
- return drm_gem_handle_create(file_priv, &vigs_fb->fb_gem->base, handle);
+ return drm_gem_handle_create(file_priv, &vigs_fb->fb_sfc->gem.base, handle);
}
static struct drm_mode_config_funcs vigs_mode_config_funcs =
int vigs_framebuffer_create(struct vigs_device *vigs_dev,
struct drm_mode_fb_cmd2 *mode_cmd,
- struct vigs_gem_object *fb_gem,
+ struct vigs_surface *fb_sfc,
struct vigs_framebuffer **vigs_fb)
{
int ret = 0;
goto fail1;
}
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_XRGB8888:
- (*vigs_fb)->format = vigsp_surface_bgrx8888;
- break;
- case DRM_FORMAT_ARGB8888:
- (*vigs_fb)->format = vigsp_surface_bgra8888;
- break;
- default:
- DRM_DEBUG_KMS("unsupported pixel format: %u\n", mode_cmd->pixel_format);
+ if ((fb_sfc->width != mode_cmd->width) ||
+ (fb_sfc->height != mode_cmd->height) ||
+ (fb_sfc->stride != mode_cmd->pitches[0])) {
+ DRM_DEBUG_KMS("surface format mismatch\n");
ret = -EINVAL;
goto fail2;
}
- ret = vigs_comm_create_surface(vigs_dev->comm,
- mode_cmd->width,
- mode_cmd->height,
- mode_cmd->pitches[0],
- (*vigs_fb)->format,
- fb_gem,
- &(*vigs_fb)->sfc_id);
-
- if (ret != 0) {
- goto fail2;
- }
-
(*vigs_fb)->comm = vigs_dev->comm;
- (*vigs_fb)->fb_gem = fb_gem;
+ (*vigs_fb)->fb_sfc = fb_sfc;
ret = drm_framebuffer_init(vigs_dev->drm_dev,
&(*vigs_fb)->base,
&vigs_framebuffer_funcs);
if (ret != 0) {
- goto fail3;
+ goto fail2;
}
drm_helper_mode_fill_fb_struct(&(*vigs_fb)->base, mode_cmd);
- drm_gem_object_reference(&fb_gem->base);
+ drm_gem_object_reference(&fb_sfc->gem.base);
return 0;
-fail3:
- vigs_comm_destroy_surface(vigs_dev->comm, (*vigs_fb)->sfc_id);
fail2:
kfree(*vigs_fb);
fail1:
return ret;
}
-int vigs_framebuffer_info_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv)
+int vigs_framebuffer_pin(struct vigs_framebuffer *vigs_fb)
{
- struct drm_vigs_fb_info *args = data;
- struct drm_mode_object *obj;
- struct drm_framebuffer *fb;
- struct vigs_framebuffer *vigs_fb;
+ int ret;
- mutex_lock(&drm_dev->mode_config.mutex);
+ vigs_gem_reserve(&vigs_fb->fb_sfc->gem);
- obj = drm_mode_object_find(drm_dev, args->fb_id, DRM_MODE_OBJECT_FB);
+ ret = vigs_gem_pin(&vigs_fb->fb_sfc->gem);
- if (!obj) {
- mutex_unlock(&drm_dev->mode_config.mutex);
- return -ENOENT;
+ if (ret != 0) {
+ vigs_gem_unreserve(&vigs_fb->fb_sfc->gem);
+ return ret;
}
- fb = obj_to_fb(obj);
- vigs_fb = fb_to_vigs_fb(fb);
+ vigs_gem_unreserve(&vigs_fb->fb_sfc->gem);
- args->sfc_id = vigs_fb->sfc_id;
+ return 0;
+}
- mutex_unlock(&drm_dev->mode_config.mutex);
+void vigs_framebuffer_unpin(struct vigs_framebuffer *vigs_fb)
+{
+ vigs_gem_reserve(&vigs_fb->fb_sfc->gem);
- return 0;
+ vigs_gem_unpin(&vigs_fb->fb_sfc->gem);
+
+ vigs_gem_unreserve(&vigs_fb->fb_sfc->gem);
}
struct vigs_device;
struct vigs_comm;
-struct vigs_gem_object;
+struct vigs_surface;
struct vigs_framebuffer
{
*/
struct vigs_comm *comm;
- vigsp_surface_format format;
-
- struct vigs_gem_object *fb_gem;
-
- /*
- * Each DRM framebuffer has a surface on host, this is
- * its id.
- */
- vigsp_surface_id sfc_id;
+ struct vigs_surface *fb_sfc;
};
static inline struct vigs_framebuffer *fb_to_vigs_fb(struct drm_framebuffer *fb)
*/
int vigs_framebuffer_create(struct vigs_device *vigs_dev,
struct drm_mode_fb_cmd2 *mode_cmd,
- struct vigs_gem_object *fb_gem,
+ struct vigs_surface *fb_sfc,
struct vigs_framebuffer **vigs_fb);
-/*
- * IOCTLs
- * @{
- */
-
-int vigs_framebuffer_info_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv);
-
-/*
- * @}
- */
+int vigs_framebuffer_pin(struct vigs_framebuffer *vigs_fb);
+void vigs_framebuffer_unpin(struct vigs_framebuffer *vigs_fb);
#endif
#include "vigs_gem.h"
-#include "vigs_buffer.h"
#include "vigs_device.h"
+#include "vigs_mman.h"
+#include "vigs_surface.h"
#include <drm/vigs_drm.h>
+#include <ttm/ttm_placement.h>
-int vigs_gem_create(struct vigs_device *vigs_dev,
- unsigned long size,
- bool kernel,
- u32 domain,
- struct vigs_gem_object **vigs_gem)
+static void vigs_gem_bo_destroy(struct ttm_buffer_object *bo)
{
+ struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
+
+ kfree(vigs_gem);
+}
+
+int vigs_gem_init(struct vigs_gem_object *vigs_gem,
+ struct vigs_device *vigs_dev,
+ enum ttm_object_type type,
+ unsigned long size,
+ bool kernel,
+ vigs_gem_destroy_func destroy)
+{
+ u32 placements[1];
+ struct ttm_placement placement;
+ enum ttm_bo_type bo_type;
int ret = 0;
size = roundup(size, PAGE_SIZE);
if (size == 0) {
+ kfree(vigs_gem);
return -EINVAL;
}
- *vigs_gem = kzalloc(sizeof(**vigs_gem), GFP_KERNEL);
+ if (type == VIGS_GEM_TYPE_SURFACE) {
+ placements[0] =
+ TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT | TTM_PL_FLAG_NO_EVICT;
+ } else if (type == VIGS_GEM_TYPE_EXECBUFFER) {
+ placements[0] =
+ TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | TTM_PL_FLAG_NO_EVICT;
+ } else {
+ kfree(vigs_gem);
+ return -EINVAL;
+ }
+
+ memset(&placement, 0, sizeof(placement));
+
+ placement.placement = placements;
+ placement.busy_placement = placements;
+ placement.num_placement = 1;
+ placement.num_busy_placement = 1;
+
+ if (kernel) {
+ bo_type = ttm_bo_type_kernel;
+ } else {
+ bo_type = ttm_bo_type_device;
+ }
+
+ if (unlikely(vigs_dev->mman->bo_dev.dev_mapping == NULL)) {
+ vigs_dev->mman->bo_dev.dev_mapping = vigs_dev->drm_dev->dev_mapping;
+ }
+
+ ret = ttm_bo_init(&vigs_dev->mman->bo_dev, &vigs_gem->bo, size, bo_type,
+ &placement, 0, 0,
+ false, NULL, size,
+ &vigs_gem_bo_destroy);
- if (!*vigs_gem) {
- ret = -ENOMEM;
- goto fail1;
+ if (ret != 0) {
+ return ret;
}
- ret = vigs_buffer_create(vigs_dev->mman,
- size,
- kernel,
- domain,
- &(*vigs_gem)->bo);
+ vigs_gem->type = type;
+ vigs_gem->pin_count = 0;
+ vigs_gem->destroy = destroy;
+
+ ret = drm_gem_object_init(vigs_dev->drm_dev, &vigs_gem->base, size);
if (ret != 0) {
- goto fail2;
+ struct ttm_buffer_object *bo = &vigs_gem->bo;
+ ttm_bo_unref(&bo);
+ return ret;
+ }
+
+ DRM_DEBUG_DRIVER("GEM created (type = %u, off = 0x%llX, sz = %lu)\n",
+ type,
+ vigs_gem_mmap_offset(vigs_gem),
+ vigs_gem_size(vigs_gem));
+
+ return 0;
+}
+
+void vigs_gem_cleanup(struct vigs_gem_object *vigs_gem)
+{
+ struct ttm_buffer_object *bo = &vigs_gem->bo;
+
+ vigs_gem_reserve(vigs_gem);
+
+ vigs_gem_kunmap(vigs_gem);
+
+ vigs_gem_unreserve(vigs_gem);
+
+ DRM_DEBUG_DRIVER("GEM destroyed (type = %u, off = 0x%llX, sz = %lu)\n",
+ vigs_gem->type,
+ vigs_gem_mmap_offset(vigs_gem),
+ vigs_gem_size(vigs_gem));
+
+ drm_gem_object_release(&vigs_gem->base);
+ ttm_bo_unref(&bo);
+}
+
+int vigs_gem_pin(struct vigs_gem_object *vigs_gem)
+{
+ u32 placements[1];
+ struct ttm_placement placement;
+ int ret;
+
+ if (vigs_gem->pin_count) {
+ ++vigs_gem->pin_count;
+ return 0;
+ }
+
+ if (vigs_gem->type == VIGS_GEM_TYPE_EXECBUFFER) {
+ return 0;
}
- ret = drm_gem_object_init(vigs_dev->drm_dev, &(*vigs_gem)->base, size);
+ placements[0] =
+ TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_NO_EVICT;
+
+ memset(&placement, 0, sizeof(placement));
+
+ placement.placement = placements;
+ placement.busy_placement = placements;
+ placement.num_placement = 1;
+ placement.num_busy_placement = 1;
+
+ ret = ttm_bo_validate(&vigs_gem->bo, &placement, false, false, false);
if (ret != 0) {
- goto fail3;
+ DRM_ERROR("GEM pin failed (type = %u, off = 0x%llX, sz = %lu)\n",
+ vigs_gem->type,
+ vigs_gem_mmap_offset(vigs_gem),
+ vigs_gem_size(vigs_gem));
+ return ret;
}
+ vigs_gem->pin_count = 1;
+
+ DRM_DEBUG_DRIVER("GEM pinned (type = %u, off = 0x%llX, sz = %lu)\n",
+ vigs_gem->type,
+ vigs_gem_mmap_offset(vigs_gem),
+ vigs_gem_size(vigs_gem));
+
return 0;
+}
+
+void vigs_gem_unpin(struct vigs_gem_object *vigs_gem)
+{
+ u32 placements[2];
+ struct ttm_placement placement;
+ int ret;
+
+ BUG_ON(vigs_gem->pin_count == 0);
+
+ if (--vigs_gem->pin_count > 0) {
+ return;
+ }
+
+ if (vigs_gem->type == VIGS_GEM_TYPE_EXECBUFFER) {
+ return;
+ }
+
+ vigs_gem_kunmap(vigs_gem);
+
+ placements[0] =
+ TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM;
+ placements[1] =
+ TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT | TTM_PL_FLAG_NO_EVICT;
-fail3:
- vigs_buffer_release((*vigs_gem)->bo);
-fail2:
- kfree(*vigs_gem);
-fail1:
- *vigs_gem = NULL;
+ memset(&placement, 0, sizeof(placement));
- return ret;
+ placement.placement = placements;
+ placement.busy_placement = placements;
+ placement.num_placement = 2;
+ placement.num_busy_placement = 2;
+
+ ret = ttm_bo_validate(&vigs_gem->bo, &placement, false, false, false);
+
+ if (ret != 0) {
+ DRM_ERROR("GEM unpin failed (type = %u, off = 0x%llX, sz = %lu)\n",
+ vigs_gem->type,
+ vigs_gem_mmap_offset(vigs_gem),
+ vigs_gem_size(vigs_gem));
+ } else {
+ DRM_DEBUG_DRIVER("GEM unpinned (type = %u, off = 0x%llX, sz = %lu)\n",
+ vigs_gem->type,
+ vigs_gem_mmap_offset(vigs_gem),
+ vigs_gem_size(vigs_gem));
+ }
+}
+
+int vigs_gem_kmap(struct vigs_gem_object *vigs_gem)
+{
+ bool is_iomem;
+ int ret;
+
+ BUG_ON((vigs_gem->type == VIGS_GEM_TYPE_SURFACE) &&
+ (vigs_gem->pin_count == 0));
+
+ if (vigs_gem->kptr) {
+ return 0;
+ }
+
+ ret = ttm_bo_kmap(&vigs_gem->bo,
+ 0,
+ vigs_gem->bo.num_pages,
+ &vigs_gem->kmap);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ vigs_gem->kptr = ttm_kmap_obj_virtual(&vigs_gem->kmap, &is_iomem);
+
+ DRM_DEBUG_DRIVER("GEM (type = %u, off = 0x%llX, sz = %lu) mapped to 0x%p\n",
+ vigs_gem->type,
+ vigs_gem_mmap_offset(vigs_gem),
+ vigs_gem_size(vigs_gem),
+ vigs_gem->kptr);
+
+ return 0;
+}
+
+void vigs_gem_kunmap(struct vigs_gem_object *vigs_gem)
+{
+ if (vigs_gem->kptr == NULL) {
+ return;
+ }
+
+ vigs_gem->kptr = NULL;
+
+ ttm_bo_kunmap(&vigs_gem->kmap);
+
+ DRM_DEBUG_DRIVER("GEM (type = %u, off = 0x%llX, sz = %lu) unmapped\n",
+ vigs_gem->type,
+ vigs_gem_mmap_offset(vigs_gem),
+ vigs_gem_size(vigs_gem));
}
void vigs_gem_free_object(struct drm_gem_object *gem)
{
struct vigs_gem_object *vigs_gem = gem_to_vigs_gem(gem);
- vigs_buffer_release(vigs_gem->bo);
-
- kfree(vigs_gem);
+ vigs_gem->destroy(vigs_gem);
}
int vigs_gem_init_object(struct drm_gem_object *gem)
struct drm_mode_create_dumb *args)
{
struct vigs_device *vigs_dev = drm_dev->dev_private;
- struct vigs_gem_object *vigs_gem = NULL;
+ struct vigs_surface *sfc = NULL;
uint32_t handle;
int ret;
+ if (args->bpp != 32) {
+ DRM_ERROR("Only 32 bpp surfaces are supported for now\n");
+ return -EINVAL;
+ }
+
args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
- args->size = ALIGN(args->size, PAGE_SIZE);
- ret = vigs_gem_create(vigs_dev,
- args->size,
- false,
- DRM_VIGS_GEM_DOMAIN_VRAM,
- &vigs_gem);
+ ret = vigs_surface_create(vigs_dev,
+ args->width,
+ args->height,
+ args->pitch,
+ vigsp_surface_bgrx8888,
+ &sfc);
if (ret != 0) {
return ret;
}
+ args->size = vigs_gem_size(&sfc->gem);
+
ret = drm_gem_handle_create(file_priv,
- &vigs_gem->base,
+ &sfc->gem.base,
&handle);
- drm_gem_object_unreference_unlocked(&vigs_gem->base);
+ drm_gem_object_unreference_unlocked(&sfc->gem.base);
if (ret == 0) {
args->handle = handle;
}
- DRM_DEBUG_DRIVER("GEM %u created\n", handle);
-
return 0;
}
struct drm_device *drm_dev,
uint32_t handle)
{
- DRM_DEBUG_DRIVER("destroying GEM %u\n", handle);
-
return drm_gem_handle_delete(file_priv, handle);
}
vigs_gem = gem_to_vigs_gem(gem);
- *offset_p = vigs_buffer_mmap_offset(vigs_gem->bo);
-
- drm_gem_object_unreference_unlocked(gem);
-
- return 0;
-}
-
-int vigs_gem_create_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv)
-{
- struct vigs_device *vigs_dev = drm_dev->dev_private;
- struct drm_vigs_gem_create *args = data;
- struct vigs_gem_object *vigs_gem = NULL;
- uint32_t handle;
- int ret;
-
- ret = vigs_gem_create(vigs_dev,
- args->size,
- false,
- args->domain,
- &vigs_gem);
-
- if (ret != 0) {
- return ret;
- }
-
- ret = drm_gem_handle_create(file_priv,
- &vigs_gem->base,
- &handle);
-
- drm_gem_object_unreference_unlocked(&vigs_gem->base);
-
- if (ret == 0) {
- args->size = vigs_buffer_size(vigs_gem->bo);
- args->handle = handle;
- args->domain_offset = vigs_buffer_offset(vigs_gem->bo);
- DRM_DEBUG_DRIVER("GEM %u created\n", handle);
- }
-
- return ret;
-}
-
-int vigs_gem_mmap_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv)
-{
- struct drm_vigs_gem_mmap *args = data;
- struct drm_gem_object *gem;
- struct vigs_gem_object *vigs_gem;
-
- gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
-
- if (gem == NULL) {
- return -ENOENT;
- }
-
- vigs_gem = gem_to_vigs_gem(gem);
-
- args->offset = vigs_buffer_mmap_offset(vigs_gem->bo);
-
- drm_gem_object_unreference_unlocked(gem);
-
- return 0;
-}
-
-int vigs_gem_info_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv)
-{
- struct drm_vigs_gem_info *args = data;
- struct drm_gem_object *gem;
- struct vigs_gem_object *vigs_gem;
-
- gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
-
- if (gem == NULL) {
- return -ENOENT;
- }
-
- vigs_gem = gem_to_vigs_gem(gem);
-
- args->domain = vigs_gem->bo->domain;
- args->domain_offset = vigs_buffer_offset(vigs_gem->bo);
+ *offset_p = vigs_gem_mmap_offset(vigs_gem);
drm_gem_object_unreference_unlocked(gem);
#define _VIGS_GEM_H_
#include "drmP.h"
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_object.h>
+
+#define VIGS_GEM_TYPE_SURFACE ttm_driver_type0
+#define VIGS_GEM_TYPE_EXECBUFFER ttm_driver_type1
struct vigs_device;
-struct vigs_buffer_object;
+struct vigs_gem_object;
+
+typedef void (*vigs_gem_destroy_func)(struct vigs_gem_object *vigs_gem);
struct vigs_gem_object
{
struct drm_gem_object base;
- struct vigs_buffer_object *bo;
+ struct ttm_buffer_object bo;
+
+ enum ttm_object_type type;
+
+ /*
+ * Valid only after successful call to 'vigs_gem_kmap'.
+ * @{
+ */
+
+ struct ttm_bo_kmap_obj kmap;
+ void *kptr; /* Kernel pointer to buffer data. */
+
+ /*
+ * @}
+ */
+
+ volatile unsigned pin_count;
+
+ vigs_gem_destroy_func destroy;
};
static inline struct vigs_gem_object *gem_to_vigs_gem(struct drm_gem_object *gem)
return container_of(gem, struct vigs_gem_object, base);
}
+static inline struct vigs_gem_object *bo_to_vigs_gem(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct vigs_gem_object, bo);
+}
+
+/*
+ * Initializes a gem object. 'size' is automatically rounded up to page size.
+ * 'vigs_gem' is kfree'd on failure.
+ */
+int vigs_gem_init(struct vigs_gem_object *vigs_gem,
+ struct vigs_device *vigs_dev,
+ enum ttm_object_type type,
+ unsigned long size,
+ bool kernel,
+ vigs_gem_destroy_func destroy);
+
+void vigs_gem_cleanup(struct vigs_gem_object *vigs_gem);
+
+/*
+ * Buffer size.
+ */
+static inline unsigned long vigs_gem_size(struct vigs_gem_object *vigs_gem)
+{
+ return vigs_gem->bo.num_pages << PAGE_SHIFT;
+}
+
+/*
+ * GEM offset in a placement. In case of execbuffer always the same.
+ * In case of surface only valid when GEM is pinned.
+ */
+static inline unsigned long vigs_gem_offset(struct vigs_gem_object *vigs_gem)
+{
+ return vigs_gem->bo.offset;
+}
+
+/*
+ * GEM offset relative to DRM_FILE_OFFSET. For kernel buffers it's always 0.
+ */
+static inline u64 vigs_gem_mmap_offset(struct vigs_gem_object *vigs_gem)
+{
+ return vigs_gem->bo.addr_space_offset;
+}
+
+static inline void vigs_gem_reserve(struct vigs_gem_object *vigs_gem)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&vigs_gem->bo, false, false, false, 0);
+
+ BUG_ON(ret != 0);
+}
+
+static inline void vigs_gem_unreserve(struct vigs_gem_object *vigs_gem)
+{
+ ttm_bo_unreserve(&vigs_gem->bo);
+}
+
/*
- * Creates a gem object. 'size' is automatically rounded up to page size.
+ * Functions below MUST be called between
+ * vigs_gem_reserve/vigs_gem_unreserve.
+ * @{
+ */
+
+/*
+ * Pin/unpin GEM. For execbuffers this is a no-op, since they're always
+ * in RAM placement. For surfaces this pins the GEM into VRAM. The
+ * operation can fail if there's no room in VRAM and all GEMs currently
+ * in VRAM are pinned.
+ * @{
+ */
+int vigs_gem_pin(struct vigs_gem_object *vigs_gem);
+void vigs_gem_unpin(struct vigs_gem_object *vigs_gem);
+/*
+ * @}
+ */
+
+/*
+ * Surface GEMs must be pinned before calling these.
+ * @{
+ */
+int vigs_gem_kmap(struct vigs_gem_object *vigs_gem);
+void vigs_gem_kunmap(struct vigs_gem_object *vigs_gem);
+/*
+ * @}
+ */
+
+/*
+ * @}
+ */
+
+/*
+ * Driver hooks.
+ * @{
*/
-int vigs_gem_create(struct vigs_device *vigs_dev,
- unsigned long size,
- bool kernel,
- u32 domain,
- struct vigs_gem_object **vigs_gem);
void vigs_gem_free_object(struct drm_gem_object *gem);
struct drm_file *file_priv);
/*
+ * @}
+ */
+
+/*
* Dumb
* @{
*/
* @}
*/
-/*
- * IOCTLs
- * @{
- */
-
-int vigs_gem_create_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv);
-
-int vigs_gem_mmap_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv);
-
-int vigs_gem_info_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv);
-
-/*
- * @}
- */
-
#endif
#include "vigs_mman.h"
+#include "vigs_gem.h"
#include <ttm/ttm_placement.h>
/*
- * This is TTM-based memory manager for VIGS, it supports 3 memory placements:
- * CPU - This is for target-only memory, not shared with host.
- * VRAM - This gets allocated on "VRAM" PCI BAR, shared with host, typically
- * used for surface placement.
- * RAM - This gets allocated on "RAM" PCI BAR, shared with host, typically
- * used for protocol commands placement.
+ * This is TTM-based memory manager for VIGS, it supports 4 memory placements:
+ * CPU - This is for target-only memory, not shared with host, forced by TTM,
+ * not used.
+ * GPU - This is host-only memory, not shared with target.
+ * VRAM - This gets allocated on "VRAM" PCI BAR, shared with host, used
+ * for surface placement.
+ * RAM - This gets allocated on "RAM" PCI BAR, shared with host, used for
+ * execbuffer placement.
*
- * No eviction supported yet, so buffers cannot be moved between placements.
+ * Eviction is supported, so buffers can be moved between some placements.
+ * Allowed movements:
+ * VRAM -> GPU
+ * GPU -> VRAM
*/
/*
static int vigs_ttm_backend_bind(struct ttm_tt *tt,
struct ttm_mem_reg *bo_mem)
{
- DRM_ERROR("not implemented");
-
- return -1;
+ return 0;
}
static int vigs_ttm_backend_unbind(struct ttm_tt *tt)
{
- DRM_ERROR("not implemented");
-
- return -1;
+ return 0;
}
static void vigs_ttm_backend_destroy(struct ttm_tt *tt)
{
struct ttm_dma_tt *dma_tt = (void*)tt;
- ttm_dma_tt_fini(dma_tt);
kfree(dma_tt);
}
struct page *dummy_read_page)
{
struct ttm_dma_tt *dma_tt;
- int ret;
dma_tt = kzalloc(sizeof(struct ttm_dma_tt), GFP_KERNEL);
dma_tt->ttm.func = &vigs_ttm_backend_func;
- ret = ttm_dma_tt_init(dma_tt, bo_dev, size, page_flags,
- dummy_read_page);
+ return &dma_tt->ttm;
+}
- if (ret != 0) {
- DRM_ERROR("ttm_dma_tt_init failed: %d\n", ret);
- kfree(dma_tt);
- return NULL;
- }
+static int vigs_ttm_tt_populate(struct ttm_tt *tt)
+{
+ return 0;
+}
- return &dma_tt->ttm;
+static void vigs_ttm_tt_unpopulate(struct ttm_tt *tt)
+{
}
/*
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
+ case TTM_PL_TT:
+ man->func = &ttm_bo_manager_func;
+ man->gpu_offset = 0;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
+ TTM_MEMTYPE_FLAG_CMA;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
case TTM_PL_VRAM:
case TTM_PL_PRIV0:
- /*
- * For now we don't handle OOMs, i.e. if user mode
- * will allocate too many pixmaps then kernel will complain and
- * everything will break. Later we'll implement our own
- * ttm_mem_type_manager_func and handle OOMs.
- */
man->func = &ttm_bo_manager_func;
man->gpu_offset = 0;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
return 0;
}
+static const u32 evict_placements[1] =
+{
+ TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT | TTM_PL_FLAG_NO_EVICT
+};
+
+static const struct ttm_placement evict_placement =
+{
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = ARRAY_SIZE(evict_placements),
+ .placement = evict_placements,
+ .num_busy_placement = ARRAY_SIZE(evict_placements),
+ .busy_placement = evict_placements
+};
+
+static void vigs_ttm_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ BUG_ON(bo->mem.mem_type != TTM_PL_VRAM);
+
+ *placement = evict_placement;
+}
+
+static int vigs_ttm_move(struct ttm_buffer_object *bo,
+ bool evict,
+ bool interruptible,
+ bool no_wait_reserve,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ struct ttm_mem_reg *old_mem = &bo->mem;
+
+ if ((old_mem->mem_type == TTM_PL_VRAM) &&
+ (new_mem->mem_type == TTM_PL_TT)) {
+ DRM_DEBUG_DRIVER("ttm_move: 0x%llX vram -> gpu\n", bo->addr_space_offset);
+
+ ttm_bo_mem_put(bo, old_mem);
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+
+ return 0;
+ } else if ((old_mem->mem_type == TTM_PL_TT) &&
+ (new_mem->mem_type == TTM_PL_VRAM)) {
+ DRM_DEBUG_DRIVER("ttm_move: 0x%llX gpu -> vram\n", bo->addr_space_offset);
+
+ ttm_bo_mem_put(bo, old_mem);
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+
+ return 0;
+ } else {
+ return ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ }
+}
+
static int vigs_ttm_verify_access(struct ttm_buffer_object *bo,
struct file *filp)
{
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
- return 0;
+ case TTM_PL_TT:
+ break;
case TTM_PL_VRAM:
- DRM_DEBUG_DRIVER("VRAM reservation\n");
mem->bus.is_iomem = true;
mem->bus.base = mman->vram_base;
mem->bus.offset = mem->start << PAGE_SHIFT;
break;
case TTM_PL_PRIV0:
- DRM_DEBUG_DRIVER("RAM reservation\n");
mem->bus.is_iomem = true;
mem->bus.base = mman->ram_base;
mem->bus.offset = mem->start << PAGE_SHIFT;
static struct ttm_bo_driver vigs_ttm_bo_driver =
{
- .ttm_tt_create = &vigs_ttm_tt_create, /* Only needed for ttm_bo_type_kernel */
+ .ttm_tt_create = &vigs_ttm_tt_create, /* Needed for ttm_bo_type_kernel and TTM_PL_TT */
+ .ttm_tt_populate = &vigs_ttm_tt_populate, /* Needed for TTM_PL_TT */
+ .ttm_tt_unpopulate = &vigs_ttm_tt_unpopulate, /* Needed for TTM_PL_TT */
.invalidate_caches = &vigs_ttm_invalidate_caches,
.init_mem_type = &vigs_ttm_init_mem_type,
- /*
- * We don't support eviction right now, this will be supported
- * later, so for now all buffers are always pinned.
- */
- .evict_flags = NULL,
+ .evict_flags = &vigs_ttm_evict_flags,
+ .move = &vigs_ttm_move,
.verify_access = &vigs_ttm_verify_access,
.io_mem_reserve = &vigs_ttm_io_mem_reserve,
.io_mem_free = &vigs_ttm_io_mem_free,
};
-static struct vm_operations_struct vigs_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops = NULL;
-
-static int vigs_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct ttm_buffer_object *bo;
-
- bo = (struct ttm_buffer_object*)vma->vm_private_data;
-
- if (bo == NULL) {
- return VM_FAULT_NOPAGE;
- }
-
- return ttm_vm_ops->fault(vma, vmf);
-}
-
int vigs_mman_create(resource_size_t vram_base,
resource_size_t vram_size,
resource_size_t ram_base,
}
/*
+ * Init GPU
+ * @{
+ */
+
+ /*
+ * For GPU we're only limited by host resources, let the target create
+ * as many buffers as it likes.
+ */
+ ret = ttm_bo_init_mm(&(*mman)->bo_dev,
+ TTM_PL_TT,
+ (0xFFFFFFFFUL / PAGE_SIZE));
+ if (ret != 0) {
+ DRM_ERROR("failed initializing GPU mm\n");
+ goto fail4;
+ }
+
+ /*
+ * @}
+ */
+
+ /*
* Init VRAM
* @{
*/
num_pages);
if (ret != 0) {
DRM_ERROR("failed initializing VRAM mm\n");
- goto fail4;
+ goto fail5;
}
/*
num_pages);
if (ret != 0) {
DRM_ERROR("failed initializing RAM mm\n");
- goto fail5;
+ goto fail6;
}
/*
return 0;
-fail5:
+fail6:
ttm_bo_clean_mm(&(*mman)->bo_dev, TTM_PL_VRAM);
+fail5:
+ ttm_bo_clean_mm(&(*mman)->bo_dev, TTM_PL_TT);
fail4:
ttm_bo_device_release(&(*mman)->bo_dev);
fail3:
ttm_bo_clean_mm(&mman->bo_dev, TTM_PL_PRIV0);
ttm_bo_clean_mm(&mman->bo_dev, TTM_PL_VRAM);
+ ttm_bo_clean_mm(&mman->bo_dev, TTM_PL_TT);
ttm_bo_device_release(&mman->bo_dev);
vigs_mman_global_cleanup(mman);
kfree(mman);
}
+static struct vm_operations_struct vigs_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops = NULL;
+
+static void vigs_ttm_open(struct vm_area_struct *vma)
+{
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
+
+ vigs_gem_reserve(vigs_gem);
+
+ vigs_gem_pin(vigs_gem);
+
+ vigs_gem_unreserve(vigs_gem);
+
+ ttm_vm_ops->open(vma);
+}
+
+static void vigs_ttm_close(struct vm_area_struct *vma)
+{
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
+
+ vigs_gem_reserve(vigs_gem);
+
+ vigs_gem_unpin(vigs_gem);
+
+ vigs_gem_unreserve(vigs_gem);
+
+ ttm_vm_ops->close(vma);
+}
+
+static int vigs_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+
+ if (bo == NULL) {
+ return VM_FAULT_NOPAGE;
+ }
+
+ return ttm_vm_ops->fault(vma, vmf);
+}
+
int vigs_mman_mmap(struct vigs_mman *mman,
struct file *filp,
struct vm_area_struct *vma)
{
+ struct ttm_buffer_object *bo;
+ struct vigs_gem_object *vigs_gem;
int ret;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
if (unlikely(ttm_vm_ops == NULL)) {
ttm_vm_ops = vma->vm_ops;
vigs_ttm_vm_ops = *ttm_vm_ops;
+ vigs_ttm_vm_ops.open = &vigs_ttm_open;
+ vigs_ttm_vm_ops.close = &vigs_ttm_close;
vigs_ttm_vm_ops.fault = &vigs_ttm_fault;
}
vma->vm_ops = &vigs_ttm_vm_ops;
+ bo = vma->vm_private_data;
+
+ vigs_gem = bo_to_vigs_gem(bo);
+
+ vigs_gem_reserve(vigs_gem);
+
+ ret = vigs_gem_pin(vigs_gem);
+
+ vigs_gem_unreserve(vigs_gem);
+
+ if (ret != 0) {
+ ttm_vm_ops->close(vma);
+ return ret;
+ }
+
return 0;
}
#include "drmP.h"
#include <ttm/ttm_bo_driver.h>
+struct vigs_mman_ops
+{
+ void (*vram_to_gpu)(void *user_data, struct ttm_buffer_object *bo);
+
+ void (*gpu_to_vram)(void *user_data, struct ttm_buffer_object *bo);
+};
+
struct vigs_mman
{
struct drm_global_reference mem_global_ref;
resource_size_t vram_base;
resource_size_t ram_base;
+
+ struct vigs_mman_ops *ops;
+ void *user_data;
};
static inline struct vigs_mman *bo_dev_to_vigs_mman(struct ttm_bo_device *bo_dev)
#define _VIGS_PROTOCOL_H_
/*
- * VIGS protocol is a request-response protocol.
+ * VIGS protocol is a multiple request-single response protocol.
*
- * + Requests come one by one.
- * + The response is written after the request.
+ * + Requests come batched.
+ * + The response is written after the request batch.
+ *
+ * Not all commands can be batched, only commands that don't have response
+ * data can be batched.
*/
/*
* Bump this whenever protocol changes.
*/
-#define VIGS_PROTOCOL_VERSION 10
+#define VIGS_PROTOCOL_VERSION 11
typedef signed char vigsp_s8;
typedef signed short vigsp_s16;
typedef unsigned long long vigsp_u64;
typedef vigsp_u32 vigsp_bool;
-typedef vigsp_u32 vigsp_surface_id;
-typedef vigsp_s32 vigsp_offset;
+typedef vigsp_u64 vigsp_surface_id;
+typedef vigsp_u32 vigsp_offset;
typedef vigsp_u32 vigsp_color;
typedef vigsp_u64 vigsp_va;
-typedef vigsp_u32 vigsp_resource_id;
typedef enum
{
vigsp_cmd_create_surface = 0x3,
vigsp_cmd_destroy_surface = 0x4,
vigsp_cmd_set_root_surface = 0x5,
- vigsp_cmd_copy = 0x6,
- vigsp_cmd_solid_fill = 0x7,
- vigsp_cmd_update_vram = 0x8,
- vigsp_cmd_put_image = 0x9,
- vigsp_cmd_get_image = 0xA,
- vigsp_cmd_assign_resource = 0xB,
- vigsp_cmd_destroy_resource = 0xC,
+ vigsp_cmd_update_vram = 0x6,
+ vigsp_cmd_update_gpu = 0x7,
+ vigsp_cmd_copy = 0x8,
+ vigsp_cmd_solid_fill = 0x9,
} vigsp_cmd;
typedef enum
vigsp_surface_bgra8888 = 0x1,
} vigsp_surface_format;
-typedef enum
-{
- vigsp_resource_window = 0x0,
- vigsp_resource_pixmap = 0x1,
-} vigsp_resource_type;
-
#pragma pack(1)
-/*
- * 'vram_offset' is both surface data offset
- * and dirty flag. when it's < 0 it means surface data
- * is not allocated on target or surface is not dirty.
- * When it's >= 0 it means either surface data has been allocated
- * or surface is dirty in case if data has been allocated before.
- */
-struct vigsp_surface
-{
- vigsp_surface_id id;
- vigsp_offset vram_offset;
-};
-
struct vigsp_point
{
vigsp_u32 x;
struct vigsp_size size;
};
+struct vigsp_cmd_batch_header
+{
+ vigsp_u32 num_requests;
+};
+
struct vigsp_cmd_request_header
{
vigsp_cmd cmd;
/*
- * Response offset counting after request header.
+ * Request size starting from request header.
*/
- vigsp_u32 response_offset;
+ vigsp_u32 size;
};
struct vigsp_cmd_response_header
/*
* cmd_create_surface
*
- * Called for each surface created. Server returns 'id' of the surface,
+ * Called for each surface created. Client passes 'id' of the surface,
* all further operations must be carried out using this is. 'id' is
- * unique across whole target system, because there can be only one
- * DRM master (like X.Org) on target and this master typically wants to
- * share the surfaces with other processes.
- *
- * 'vram_offset' points to the surface data in VRAM, if any. If no surface data
- * is provided then 'vram_surface' must be < 0.
+ * unique across whole target system.
*
* @{
*/
vigsp_u32 height;
vigsp_u32 stride;
vigsp_surface_format format;
- vigsp_offset vram_offset;
-};
-
-struct vigsp_cmd_create_surface_response
-{
vigsp_surface_id id;
};
* cmd_destroy_surface
*
* Destroys the surface identified by 'id'. Surface 'id' may not be used
- * after this call and its data can be assigned to some other surface right
+ * after this call and its id can be assigned to some other surface right
* after this call.
*
* @{
* cmd_set_root_surface
*
* Sets surface identified by 'id' as new root surface. Root surface is the
- * one that's displayed on screen. Root surface must have data.
+ * one that's displayed on screen. Root surface must reside in VRAM
+ * all the time, pass 'offset' in VRAM here.
*
* Pass 0 as id in order to reset the root surface.
*
struct vigsp_cmd_set_root_surface_request
{
vigsp_surface_id id;
-};
-
-/*
- * @}
- */
-
-/*
- * cmd_copy
- *
- * Copies parts of surface 'src' to
- * surface 'dst'.
- *
- * @{
- */
-
-struct vigsp_cmd_copy_request
-{
- struct vigsp_surface src;
- struct vigsp_surface dst;
- vigsp_u32 num_entries;
- struct vigsp_copy entries[0];
-};
-
-/*
- * @}
- */
-
-/*
- * cmd_solid_fill
- *
- * Fills surface 'sfc' with color 'color' at 'entries'.
- *
- * @{
- */
-
-struct vigsp_cmd_solid_fill_request
-{
- struct vigsp_surface sfc;
- vigsp_color color;
- vigsp_u32 num_entries;
- struct vigsp_rect entries[0];
+ vigsp_offset offset;
};
/*
/*
* cmd_update_vram
*
- * Updates 'sfc' data in vram.
+ * Updates 'sfc_id' in vram.
*
* @{
*/
struct vigsp_cmd_update_vram_request
{
- struct vigsp_surface sfc;
-};
-
-/*
- * @}
- */
-
-/*
- * cmd_put_image
- *
- * Puts image 'src_va' on surface 'sfc'.
- * Host may detect page fault condition, in that case it'll
- * set 'is_pf' to 1 in response, target then must fault in 'src_va'
- * memory and repeat this command.
- *
- * @{
- */
-
-struct vigsp_cmd_put_image_request
-{
- struct vigsp_surface sfc;
- vigsp_va src_va;
- vigsp_u32 src_stride;
+ vigsp_surface_id sfc_id;
+ vigsp_offset offset;
struct vigsp_rect rect;
};
-struct vigsp_cmd_put_image_response
-{
- vigsp_bool is_pf;
-};
-
/*
* @}
*/
/*
- * cmd_get_image
+ * cmd_update_gpu
*
- * Gets image 'dst_va' from surface 'sfc_id'.
- * Host may detect page fault condition, in that case it'll
- * set 'is_pf' to 1 in response, target then must fault in 'dst_va'
- * memory and repeat this command.
+ * Updates 'sfc_id' in GPU.
*
* @{
*/
-struct vigsp_cmd_get_image_request
+struct vigsp_cmd_update_gpu_request
{
vigsp_surface_id sfc_id;
- vigsp_va dst_va;
- vigsp_u32 dst_stride;
+ vigsp_offset offset;
struct vigsp_rect rect;
};
-struct vigsp_cmd_get_image_response
-{
- vigsp_bool is_pf;
-};
-
/*
* @}
*/
/*
- * cmd_assign_resource
+ * cmd_copy
*
- * Assign resource 'res_id' to refer to surface 'sfc_id'.
+ * Copies parts of surface 'src_id' to
+ * surface 'dst_id'.
*
* @{
*/
-struct vigsp_cmd_assign_resource_request
+struct vigsp_cmd_copy_request
{
- vigsp_resource_id res_id;
- vigsp_resource_type res_type;
- vigsp_surface_id sfc_id;
- vigsp_u32 width;
- vigsp_u32 height;
+ vigsp_surface_id src_id;
+ vigsp_surface_id dst_id;
+ vigsp_u32 num_entries;
+ struct vigsp_copy entries[0];
};
/*
*/
/*
- * cmd_destroy_resource
+ * cmd_solid_fill
*
- * Destroys resource 'id'.
+ * Fills surface 'sfc_id' with color 'color' at 'entries'.
*
* @{
*/
-struct vigsp_cmd_destroy_resource_request
+struct vigsp_cmd_solid_fill_request
{
- vigsp_resource_id id;
+ vigsp_surface_id sfc_id;
+ vigsp_color color;
+ vigsp_u32 num_entries;
+ struct vigsp_rect entries[0];
};
/*
--- /dev/null
+#include "vigs_surface.h"
+#include "vigs_device.h"
+#include "vigs_comm.h"
+#include <drm/vigs_drm.h>
+
+static void vigs_surface_destroy(struct vigs_gem_object *gem)
+{
+ struct vigs_surface *sfc = vigs_gem_to_vigs_surface(gem);
+ struct vigs_device *vigs_dev = gem->base.dev->dev_private;
+
+ vigs_comm_destroy_surface(vigs_dev->comm,
+ vigs_surface_id(sfc));
+
+ vigs_gem_cleanup(&sfc->gem);
+}
+
+int vigs_surface_create(struct vigs_device *vigs_dev,
+ u32 width,
+ u32 height,
+ u32 stride,
+ vigsp_surface_format format,
+ struct vigs_surface **sfc)
+{
+ int ret = 0;
+
+ *sfc = kzalloc(sizeof(**sfc), GFP_KERNEL);
+
+ if (!*sfc) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ (*sfc)->width = width;
+ (*sfc)->height = height;
+ (*sfc)->stride = stride;
+ (*sfc)->format = format;
+
+ ret = vigs_gem_init(&(*sfc)->gem,
+ vigs_dev,
+ VIGS_GEM_TYPE_SURFACE,
+ stride * height,
+ false,
+ &vigs_surface_destroy);
+
+ if (ret != 0) {
+ goto fail1;
+ }
+
+ ret = vigs_comm_create_surface(vigs_dev->comm,
+ width,
+ height,
+ stride,
+ format,
+ vigs_surface_id(*sfc));
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ return 0;
+
+fail2:
+ vigs_gem_cleanup(&(*sfc)->gem);
+fail1:
+ *sfc = NULL;
+
+ return ret;
+}
+
+int vigs_surface_create_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct drm_vigs_create_surface *args = data;
+ struct vigs_surface *sfc = NULL;
+ uint32_t handle;
+ int ret;
+
+ ret = vigs_surface_create(vigs_dev,
+ args->width,
+ args->height,
+ args->stride,
+ args->format,
+ &sfc);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ ret = drm_gem_handle_create(file_priv,
+ &sfc->gem.base,
+ &handle);
+
+ drm_gem_object_unreference_unlocked(&sfc->gem.base);
+
+ if (ret == 0) {
+ args->handle = handle;
+ args->mmap_offset = vigs_gem_mmap_offset(&sfc->gem);
+ args->id = vigs_surface_id(sfc);
+ }
+
+ return ret;
+}
+
+int vigs_surface_info_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vigs_surface_info *args = data;
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+ struct vigs_surface *sfc;
+
+ gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
+
+ if (gem == NULL) {
+ return -ENOENT;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ if (vigs_gem->type != VIGS_GEM_TYPE_SURFACE) {
+ return -ENOENT;
+ }
+
+ sfc = vigs_gem_to_vigs_surface(vigs_gem);
+
+ args->width = sfc->width;
+ args->height = sfc->height;
+ args->stride = sfc->stride;
+ args->format = sfc->format;
+ args->mmap_offset = vigs_gem_mmap_offset(vigs_gem);
+ args->id = vigs_surface_id(sfc);
+
+ drm_gem_object_unreference_unlocked(gem);
+
+ return 0;
+}
--- /dev/null
+#ifndef _VIGS_SURFACE_H_
+#define _VIGS_SURFACE_H_
+
+#include "drmP.h"
+#include "vigs_protocol.h"
+#include "vigs_gem.h"
+
+struct vigs_surface
+{
+ /*
+ * Must be first member!
+ */
+ struct vigs_gem_object gem;
+
+ u32 width;
+ u32 height;
+ u32 stride;
+ vigsp_surface_format format;
+};
+
+static inline struct vigs_surface *vigs_gem_to_vigs_surface(struct vigs_gem_object *vigs_gem)
+{
+ return container_of(vigs_gem, struct vigs_surface, gem);
+}
+
+int vigs_surface_create(struct vigs_device *vigs_dev,
+ u32 width,
+ u32 height,
+ u32 stride,
+ vigsp_surface_format format,
+ struct vigs_surface **sfc);
+
+static inline vigsp_surface_id vigs_surface_id(struct vigs_surface *sfc)
+{
+ return vigs_gem_mmap_offset(&sfc->gem);
+}
+
+/*
+ * IOCTLs
+ * @{
+ */
+
+int vigs_surface_create_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_surface_info_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
+#endif
/*
* Bump this whenever driver interface changes.
*/
-#define DRM_VIGS_DRIVER_VERSION 3
-
-#define DRM_VIGS_GEM_DOMAIN_VRAM 0
-#define DRM_VIGS_GEM_DOMAIN_RAM 1
+#define DRM_VIGS_DRIVER_VERSION 4
struct drm_vigs_get_protocol_version
{
uint32_t version;
};
-struct drm_vigs_gem_create
+struct drm_vigs_create_surface
{
- uint32_t domain;
- uint32_t size;
+ uint32_t width;
+ uint32_t height;
+ uint32_t stride;
+ uint32_t format;
uint32_t handle;
- uint32_t domain_offset;
+ uint64_t mmap_offset;
+ uint64_t id;
};
-struct drm_vigs_gem_mmap
+struct drm_vigs_create_execbuffer
{
+ uint32_t size;
uint32_t handle;
- uint64_t offset;
};
-struct drm_vigs_gem_info
+struct drm_vigs_surface_info
{
uint32_t handle;
- uint32_t domain;
- uint32_t domain_offset;
-};
-
-struct drm_vigs_user_enter
-{
- uint32_t index;
+ uint32_t width;
+ uint32_t height;
+ uint32_t stride;
+ uint32_t format;
+ uint64_t mmap_offset;
+ uint64_t id;
};
-struct drm_vigs_user_leave
+struct drm_vigs_exec
{
- uint32_t index;
-};
-
-struct drm_vigs_fb_info
-{
- uint32_t fb_id;
- uint32_t sfc_id;
+ uint32_t handle;
};
#define DRM_VIGS_GET_PROTOCOL_VERSION 0x00
-#define DRM_VIGS_GEM_CREATE 0x01
-#define DRM_VIGS_GEM_MMAP 0x02
-#define DRM_VIGS_GEM_INFO 0x03
-#define DRM_VIGS_USER_ENTER 0x04
-#define DRM_VIGS_USER_LEAVE 0x05
-#define DRM_VIGS_FB_INFO 0x06
+#define DRM_VIGS_CREATE_SURFACE 0x01
+#define DRM_VIGS_CREATE_EXECBUFFER 0x02
+#define DRM_VIGS_SURFACE_INFO 0x03
+#define DRM_VIGS_EXEC 0x04
#define DRM_IOCTL_VIGS_GET_PROTOCOL_VERSION DRM_IOR(DRM_COMMAND_BASE + \
DRM_VIGS_GET_PROTOCOL_VERSION, struct drm_vigs_get_protocol_version)
-#define DRM_IOCTL_VIGS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
- DRM_VIGS_GEM_CREATE, struct drm_vigs_gem_create)
-#define DRM_IOCTL_VIGS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \
- DRM_VIGS_GEM_MMAP, struct drm_vigs_gem_mmap)
-#define DRM_IOCTL_VIGS_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + \
- DRM_VIGS_GEM_INFO, struct drm_vigs_gem_info)
-#define DRM_IOCTL_VIGS_USER_ENTER DRM_IOR(DRM_COMMAND_BASE + \
- DRM_VIGS_USER_ENTER, struct drm_vigs_user_enter)
-#define DRM_IOCTL_VIGS_USER_LEAVE DRM_IOW(DRM_COMMAND_BASE + \
- DRM_VIGS_USER_LEAVE, struct drm_vigs_user_leave)
-#define DRM_IOCTL_VIGS_FB_INFO DRM_IOWR(DRM_COMMAND_BASE + \
- DRM_VIGS_FB_INFO, struct drm_vigs_fb_info)
+#define DRM_IOCTL_VIGS_CREATE_SURFACE DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_CREATE_SURFACE, struct drm_vigs_create_surface)
+#define DRM_IOCTL_VIGS_CREATE_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_CREATE_EXECBUFFER, struct drm_vigs_create_execbuffer)
+#define DRM_IOCTL_VIGS_SURFACE_INFO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_SURFACE_INFO, struct drm_vigs_surface_info)
+#define DRM_IOCTL_VIGS_EXEC DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_EXEC, struct drm_vigs_exec)
#endif