return ret;
}
+int vigs_comm_update_vram(struct vigs_comm *comm,
+ vigsp_surface_id id,
+ vigsp_offset offset)
+{
+ int ret;
+ struct vigsp_cmd_update_vram_request *request;
+
+ DRM_DEBUG_DRIVER("id = %u, offset = %u\n", id, offset);
+
+ mutex_lock(&comm->mutex);
+
+ ret = vigs_comm_prepare(comm,
+ vigsp_cmd_update_vram,
+ sizeof(*request),
+ 0,
+ (void**)&request,
+ NULL);
+
+ if (ret == 0) {
+ request->sfc_id = id;
+ request->offset = offset;
+
+ ret = vigs_comm_exec_internal(comm);
+ }
+
+ mutex_unlock(&comm->mutex);
+
+ return ret;
+}
+
+int vigs_comm_update_gpu(struct vigs_comm *comm,
+ vigsp_surface_id id,
+ vigsp_offset offset)
+{
+ int ret;
+ struct vigsp_cmd_update_gpu_request *request;
+
+ DRM_DEBUG_DRIVER("id = %u, offset = %u\n", id, offset);
+
+ mutex_lock(&comm->mutex);
+
+ ret = vigs_comm_prepare(comm,
+ vigsp_cmd_update_gpu,
+ sizeof(*request),
+ 0,
+ (void**)&request,
+ NULL);
+
+ if (ret == 0) {
+ request->sfc_id = id;
+ request->offset = offset;
+
+ ret = vigs_comm_exec_internal(comm);
+ }
+
+ mutex_unlock(&comm->mutex);
+
+ return ret;
+}
+
int vigs_comm_get_protocol_version_ioctl(struct drm_device *drm_dev,
void *data,
struct drm_file *file_priv)
vigsp_surface_id id,
vigsp_offset offset);
+int vigs_comm_update_vram(struct vigs_comm *comm,
+ vigsp_surface_id id,
+ vigsp_offset offset);
+
+int vigs_comm_update_gpu(struct vigs_comm *comm,
+ vigsp_surface_id id,
+ vigsp_offset offset);
+
/*
* IOCTLs
* @{
#include "vigs_surface.h"
#include <drm/vigs_drm.h>
+static int vigs_device_mman_map(void *user_data, struct ttm_buffer_object *bo)
+{
+ struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
+ int ret;
+
+ vigs_gem_reserve(vigs_gem);
+
+ ret = vigs_gem_pin(vigs_gem);
+
+ vigs_gem_unreserve(vigs_gem);
+
+ return ret;
+}
+
+static void vigs_device_mman_unmap(void *user_data, struct ttm_buffer_object *bo)
+{
+ struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
+
+ vigs_gem_reserve(vigs_gem);
+
+ vigs_gem_unpin(vigs_gem);
+
+ vigs_gem_unreserve(vigs_gem);
+}
+
+static void vigs_device_mman_vram_to_gpu(void *user_data,
+ struct ttm_buffer_object *bo)
+{
+ struct vigs_device *vigs_dev = user_data;
+ struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
+ struct vigs_surface *vigs_sfc = vigs_gem_to_vigs_surface(vigs_gem);
+
+ if (vigs_sfc->is_dirty) {
+ vigs_comm_update_gpu(vigs_dev->comm,
+ vigs_sfc->id,
+ vigs_gem_offset(vigs_gem));
+ vigs_sfc->is_dirty = false;
+ }
+}
+
+static void vigs_device_mman_gpu_to_vram(void *user_data,
+ struct ttm_buffer_object *bo,
+ unsigned long new_offset)
+{
+ struct vigs_device *vigs_dev = user_data;
+ struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
+ struct vigs_surface *vigs_sfc = vigs_gem_to_vigs_surface(vigs_gem);
+
+ vigs_comm_update_vram(vigs_dev->comm,
+ vigs_sfc->id,
+ new_offset);
+}
+
+static struct vigs_mman_ops mman_ops =
+{
+ .map = &vigs_device_mman_map,
+ .unmap = &vigs_device_mman_unmap,
+ .vram_to_gpu = &vigs_device_mman_vram_to_gpu,
+ .gpu_to_vram = &vigs_device_mman_gpu_to_vram
+};
+
static struct vigs_surface
*vigs_device_reference_surface_unlocked(struct vigs_device *vigs_dev,
vigsp_surface_id sfc_id)
if (vigs_gem_in_vram(&sfc->gem)) {
update_vram_request->offset = vigs_gem_offset(&sfc->gem);
} else {
+ DRM_DEBUG_DRIVER("Surface %u not in VRAM, ignoring update_vram\n",
+ update_vram_request->sfc_id);
update_vram_request->sfc_id = 0;
}
list_add_tail(&sfc->gem.list, gem_list);
vigs_gem_reserve(&sfc->gem);
if (vigs_gem_in_vram(&sfc->gem)) {
update_gpu_request->offset = vigs_gem_offset(&sfc->gem);
+ sfc->is_dirty = false;
} else {
+ DRM_DEBUG_DRIVER("Surface %u not in VRAM, ignoring update_gpu\n",
+ update_gpu_request->sfc_id);
update_gpu_request->sfc_id = 0;
}
list_add_tail(&sfc->gem.list, gem_list);
ret = vigs_mman_create(vigs_dev->vram_base, vigs_dev->vram_size,
vigs_dev->ram_base, vigs_dev->ram_size,
+ &mman_ops,
+ vigs_dev,
&vigs_dev->mman);
if (ret != 0) {
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(VIGS_EXEC, vigs_device_exec_ioctl,
DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_SURFACE_SET_DIRTY, vigs_surface_set_dirty_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
};
static const struct file_operations vigs_drm_driver_fops =
#include "vigs_mman.h"
-#include "vigs_gem.h"
#include <ttm/ttm_placement.h>
/*
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
+ struct vigs_mman *mman = bo_dev_to_vigs_mman(bo->bdev);
struct ttm_mem_reg *old_mem = &bo->mem;
if ((old_mem->mem_type == TTM_PL_VRAM) &&
(new_mem->mem_type == TTM_PL_TT)) {
DRM_DEBUG_DRIVER("ttm_move: 0x%llX vram -> gpu\n", bo->addr_space_offset);
+ mman->ops->vram_to_gpu(mman->user_data, bo);
+
ttm_bo_mem_put(bo, old_mem);
*old_mem = *new_mem;
(new_mem->mem_type == TTM_PL_VRAM)) {
DRM_DEBUG_DRIVER("ttm_move: 0x%llX gpu -> vram\n", bo->addr_space_offset);
+ mman->ops->gpu_to_vram(mman->user_data, bo,
+ (new_mem->start << PAGE_SHIFT) +
+ bo->bdev->man[new_mem->mem_type].gpu_offset);
+
ttm_bo_mem_put(bo, old_mem);
*old_mem = *new_mem;
resource_size_t vram_size,
resource_size_t ram_base,
resource_size_t ram_size,
+ struct vigs_mman_ops *ops,
+ void *user_data,
struct vigs_mman **mman)
{
int ret = 0;
(*mman)->vram_base = vram_base;
(*mman)->ram_base = ram_base;
+ (*mman)->ops = ops;
+ (*mman)->user_data = user_data;
ret = ttm_bo_device_init(&(*mman)->bo_dev,
(*mman)->bo_global_ref.ref.object,
static void vigs_ttm_open(struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo = vma->vm_private_data;
- struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
-
- vigs_gem_reserve(vigs_gem);
+ struct vigs_mman *mman = bo_dev_to_vigs_mman(bo->bdev);
- vigs_gem_pin(vigs_gem);
-
- vigs_gem_unreserve(vigs_gem);
+ mman->ops->map(mman->user_data, bo);
ttm_vm_ops->open(vma);
}
static void vigs_ttm_close(struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo = vma->vm_private_data;
- struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
-
- vigs_gem_reserve(vigs_gem);
+ struct vigs_mman *mman = bo_dev_to_vigs_mman(bo->bdev);
- vigs_gem_unpin(vigs_gem);
-
- vigs_gem_unreserve(vigs_gem);
+ mman->ops->unmap(mman->user_data, bo);
ttm_vm_ops->close(vma);
}
struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo;
- struct vigs_gem_object *vigs_gem;
int ret;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
bo = vma->vm_private_data;
- vigs_gem = bo_to_vigs_gem(bo);
-
- vigs_gem_reserve(vigs_gem);
-
- ret = vigs_gem_pin(vigs_gem);
-
- vigs_gem_unreserve(vigs_gem);
+ ret = mman->ops->map(mman->user_data, bo);
if (ret != 0) {
ttm_vm_ops->close(vma);
struct vigs_mman_ops
{
+ /*
+ * 'bo' is unreserved while calling these.
+ */
+ int (*map)(void *user_data, struct ttm_buffer_object *bo);
+ void (*unmap)(void *user_data, struct ttm_buffer_object *bo);
+ /*
+ * @}
+ */
+
+ /*
+ * 'bo' is reserved while calling these.
+ * @{
+ */
void (*vram_to_gpu)(void *user_data, struct ttm_buffer_object *bo);
-
- void (*gpu_to_vram)(void *user_data, struct ttm_buffer_object *bo);
+ void (*gpu_to_vram)(void *user_data, struct ttm_buffer_object *bo,
+ unsigned long new_offset);
+ /*
+ * @}
+ */
};
struct vigs_mman
resource_size_t vram_size,
resource_size_t ram_base,
resource_size_t ram_size,
+ struct vigs_mman_ops *ops,
+ void *user_data,
struct vigs_mman **mman);
void vigs_mman_destroy(struct vigs_mman *mman);
/*
* Bump this whenever protocol changes.
*/
-#define VIGS_PROTOCOL_VERSION 11
+#define VIGS_PROTOCOL_VERSION 12
typedef signed char vigsp_s8;
typedef signed short vigsp_s16;
{
vigsp_surface_id sfc_id;
vigsp_offset offset;
- struct vigsp_rect rect;
};
/*
{
vigsp_surface_id sfc_id;
vigsp_offset offset;
- struct vigsp_rect rect;
};
/*
return 0;
}
+
+int vigs_surface_set_dirty_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vigs_surface_set_dirty *args = data;
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+ struct vigs_surface *sfc;
+
+ gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
+
+ if (gem == NULL) {
+ return -ENOENT;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ if (vigs_gem->type != VIGS_GEM_TYPE_SURFACE) {
+ drm_gem_object_unreference_unlocked(gem);
+ return -ENOENT;
+ }
+
+ sfc = vigs_gem_to_vigs_surface(vigs_gem);
+
+ vigs_gem_reserve(&sfc->gem);
+
+ if (vigs_gem_in_vram(&sfc->gem)) {
+ sfc->is_dirty = true;
+ }
+
+ vigs_gem_unreserve(&sfc->gem);
+
+ drm_gem_object_unreference_unlocked(gem);
+
+ return 0;
+}
u32 stride;
vigsp_surface_format format;
vigsp_surface_id id;
+
+ /*
+ * Members below MUST be accessed between
+ * vigs_gem_reserve/vigs_gem_unreserve.
+ * @{
+ */
+
+ bool is_dirty;
+
+ /*
+ * @}
+ */
};
static inline struct vigs_surface *vigs_gem_to_vigs_surface(struct vigs_gem_object *vigs_gem)
void *data,
struct drm_file *file_priv);
+int vigs_surface_set_dirty_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
/*
* @}
*/
/*
* Bump this whenever driver interface changes.
*/
-#define DRM_VIGS_DRIVER_VERSION 4
+#define DRM_VIGS_DRIVER_VERSION 5
struct drm_vigs_get_protocol_version
{
uint32_t handle;
};
+struct drm_vigs_surface_set_dirty
+{
+ uint32_t handle;
+};
+
#define DRM_VIGS_GET_PROTOCOL_VERSION 0x00
#define DRM_VIGS_CREATE_SURFACE 0x01
#define DRM_VIGS_CREATE_EXECBUFFER 0x02
#define DRM_VIGS_SURFACE_INFO 0x03
#define DRM_VIGS_EXEC 0x04
+#define DRM_VIGS_SURFACE_SET_DIRTY 0x05
#define DRM_IOCTL_VIGS_GET_PROTOCOL_VERSION DRM_IOR(DRM_COMMAND_BASE + \
DRM_VIGS_GET_PROTOCOL_VERSION, struct drm_vigs_get_protocol_version)
DRM_VIGS_SURFACE_INFO, struct drm_vigs_surface_info)
#define DRM_IOCTL_VIGS_EXEC DRM_IOW(DRM_COMMAND_BASE + \
DRM_VIGS_EXEC, struct drm_vigs_exec)
+#define DRM_IOCTL_VIGS_SURFACE_SET_DIRTY DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_SURFACE_SET_DIRTY, struct drm_vigs_surface_set_dirty)
#endif