struct vigs_device *vigs_dev = user_data;
struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
struct vigs_surface *vigs_sfc = vigs_gem_to_vigs_surface(vigs_gem);
+ bool need_gpu_update = vigs_surface_need_gpu_update(vigs_sfc);
- if (!vigs_sfc->is_gpu_dirty) {
+ if (!vigs_sfc->is_gpu_dirty && need_gpu_update) {
+ DRM_INFO("vram_to_gpu: 0x%llX\n", bo->addr_space_offset);
vigs_comm_update_gpu(vigs_dev->comm,
vigs_sfc->id,
vigs_sfc->width,
vigs_sfc->height,
vigs_gem_offset(vigs_gem));
+ } else {
+ DRM_INFO("vram_to_gpu: 0x%llX (no-op)\n", bo->addr_space_offset);
}
vigs_sfc->is_gpu_dirty = false;
struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
struct vigs_surface *vigs_sfc = vigs_gem_to_vigs_surface(vigs_gem);
- vigs_comm_update_vram(vigs_dev->comm,
- vigs_sfc->id,
- new_offset);
+ if (vigs_surface_need_vram_update(vigs_sfc)) {
+ DRM_DEBUG_DRIVER("0x%llX\n", bo->addr_space_offset);
+ vigs_comm_update_vram(vigs_dev->comm,
+ vigs_sfc->id,
+ new_offset);
+ } else {
+ DRM_DEBUG_DRIVER("0x%llX (no-op)\n", bo->addr_space_offset);
+ }
+}
+
+static void vigs_device_mman_init_vma(void *user_data,
+ void *vma_data_opaque,
+ struct ttm_buffer_object *bo)
+{
+ struct vigs_vma_data *vma_data = vma_data_opaque;
+ struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
+
+ if (vigs_gem->type != VIGS_GEM_TYPE_SURFACE) {
+ vma_data->sfc = NULL;
+ return;
+ }
+
+ vigs_vma_data_init(vma_data, vigs_gem_to_vigs_surface(vigs_gem));
+}
+
+static void vigs_device_mman_cleanup_vma(void *user_data,
+ void *vma_data_opaque)
+{
+ struct vigs_vma_data *vma_data = vma_data_opaque;
+
+ if (!vma_data->sfc) {
+ return;
+ }
+
+ vigs_vma_data_cleanup(vma_data);
}
static struct vigs_mman_ops mman_ops =
{
.vram_to_gpu = &vigs_device_mman_vram_to_gpu,
- .gpu_to_vram = &vigs_device_mman_gpu_to_vram
+ .gpu_to_vram = &vigs_device_mman_gpu_to_vram,
+ .init_vma = &vigs_device_mman_init_vma,
+ .cleanup_vma = &vigs_device_mman_cleanup_vma
};
static struct vigs_surface
return sfc;
}
-static bool vigs_gem_is_reserved(struct list_head* gem_list,
+static bool vigs_gem_is_reserved(struct list_head *gem_list,
struct vigs_gem_object *gem)
{
struct vigs_gem_object *tmp;
return false;
}
+static struct vigs_surface
+ *vigs_surface_reserve(struct vigs_device *vigs_dev,
+ struct list_head *gem_list,
+ vigsp_surface_id sfc_id)
+{
+ struct vigs_surface *sfc =
+ vigs_device_reference_surface_unlocked(vigs_dev, sfc_id);
+
+ if (!sfc) {
+ DRM_ERROR("Surface %u not found\n", sfc_id);
+ return NULL;
+ }
+
+ if (vigs_gem_is_reserved(gem_list, &sfc->gem)) {
+ drm_gem_object_unreference_unlocked(&sfc->gem.base);
+ } else {
+ vigs_gem_reserve(&sfc->gem);
+ list_add_tail(&sfc->gem.list, gem_list);
+ }
+
+ return sfc;
+}
+
/*
* 'gem_list' will hold a list of GEMs that should be
* unreserved and unreferenced after execution.
struct vigsp_cmd_update_gpu_request *update_gpu;
struct vigsp_cmd_copy_request *copy;
struct vigsp_cmd_solid_fill_request *solid_fill;
+ void *data;
} request;
vigsp_u32 i;
struct vigs_surface *sfc;
break;
}
+ request.data = (request_header + 1);
+
switch (request_header->cmd) {
case vigsp_cmd_update_vram:
- request.update_vram =
- (struct vigsp_cmd_update_vram_request*)(request_header + 1);
- sfc = vigs_device_reference_surface_unlocked(vigs_dev, request.update_vram->sfc_id);
+ sfc = vigs_surface_reserve(vigs_dev,
+ gem_list,
+ request.update_vram->sfc_id);
if (!sfc) {
- DRM_ERROR("Surface %u not found\n", request.update_vram->sfc_id);
ret = -EINVAL;
break;
}
- if (vigs_gem_is_reserved(gem_list, &sfc->gem)) {
- drm_gem_object_unreference_unlocked(&sfc->gem.base);
- } else {
- vigs_gem_reserve(&sfc->gem);
- list_add_tail(&sfc->gem.list, gem_list);
- }
if (vigs_gem_in_vram(&sfc->gem)) {
- request.update_vram->offset = vigs_gem_offset(&sfc->gem);
- sfc->is_gpu_dirty = false;
+ if (vigs_surface_need_vram_update(sfc)) {
+ request.update_vram->offset = vigs_gem_offset(&sfc->gem);
+ sfc->is_gpu_dirty = false;
+ } else {
+ DRM_DEBUG_DRIVER("Surface %u doesn't need to be updated, ignoring update_vram\n",
+ request.update_vram->sfc_id);
+ request.update_vram->sfc_id = 0;
+ }
} else {
DRM_DEBUG_DRIVER("Surface %u not in VRAM, ignoring update_vram\n",
request.update_vram->sfc_id);
}
break;
case vigsp_cmd_update_gpu:
- request.update_gpu =
- (struct vigsp_cmd_update_gpu_request*)(request_header + 1);
- sfc = vigs_device_reference_surface_unlocked(vigs_dev, request.update_gpu->sfc_id);
+ sfc = vigs_surface_reserve(vigs_dev,
+ gem_list,
+ request.update_gpu->sfc_id);
if (!sfc) {
- DRM_ERROR("Surface %u not found\n", request.update_gpu->sfc_id);
ret = -EINVAL;
break;
}
- if (vigs_gem_is_reserved(gem_list, &sfc->gem)) {
- drm_gem_object_unreference_unlocked(&sfc->gem.base);
- } else {
- vigs_gem_reserve(&sfc->gem);
- list_add_tail(&sfc->gem.list, gem_list);
- }
if (vigs_gem_in_vram(&sfc->gem)) {
- request.update_gpu->offset = vigs_gem_offset(&sfc->gem);
- sfc->is_gpu_dirty = false;
+ if (vigs_surface_need_gpu_update(sfc)) {
+ request.update_gpu->offset = vigs_gem_offset(&sfc->gem);
+ sfc->is_gpu_dirty = false;
+ } else {
+ DRM_DEBUG_DRIVER("Surface %u doesn't need to be updated, ignoring update_gpu\n",
+ request.update_gpu->sfc_id);
+ request.update_gpu->sfc_id = 0;
+ }
} else {
DRM_DEBUG_DRIVER("Surface %u not in VRAM, ignoring update_gpu\n",
request.update_gpu->sfc_id);
}
break;
case vigsp_cmd_copy:
- request.copy =
- (struct vigsp_cmd_copy_request*)(request_header + 1);
- sfc = vigs_device_reference_surface_unlocked(vigs_dev, request.copy->dst_id);
+ sfc = vigs_surface_reserve(vigs_dev,
+ gem_list,
+ request.copy->dst_id);
if (!sfc) {
- DRM_ERROR("Surface %u not found\n", request.copy->dst_id);
ret = -EINVAL;
break;
}
- if (vigs_gem_is_reserved(gem_list, &sfc->gem)) {
- drm_gem_object_unreference_unlocked(&sfc->gem.base);
- } else {
- vigs_gem_reserve(&sfc->gem);
- list_add_tail(&sfc->gem.list, gem_list);
- }
if (vigs_gem_in_vram(&sfc->gem)) {
sfc->is_gpu_dirty = true;
}
break;
case vigsp_cmd_solid_fill:
- request.solid_fill =
- (struct vigsp_cmd_solid_fill_request*)(request_header + 1);
- sfc = vigs_device_reference_surface_unlocked(vigs_dev, request.solid_fill->sfc_id);
+ sfc = vigs_surface_reserve(vigs_dev,
+ gem_list,
+ request.solid_fill->sfc_id);
if (!sfc) {
- DRM_ERROR("Surface %u not found\n", request.solid_fill->sfc_id);
ret = -EINVAL;
break;
}
- if (vigs_gem_is_reserved(gem_list, &sfc->gem)) {
- drm_gem_object_unreference_unlocked(&sfc->gem.base);
- } else {
- vigs_gem_reserve(&sfc->gem);
- list_add_tail(&sfc->gem.list, gem_list);
- }
if (vigs_gem_in_vram(&sfc->gem)) {
sfc->is_gpu_dirty = true;
}
}
request_header =
- (struct vigsp_cmd_request_header*)((u8*)(request_header + 1) +
- request_header->size);
+ (struct vigsp_cmd_request_header*)(request.data +
+ request_header->size);
}
return 0;
ret = vigs_mman_create(vigs_dev->vram_base, vigs_dev->vram_size,
vigs_dev->ram_base, vigs_dev->ram_size,
+ sizeof(struct vigs_vma_data),
&mman_ops,
vigs_dev,
&vigs_dev->mman);
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(VIGS_SURFACE_SET_GPU_DIRTY, vigs_surface_set_gpu_dirty_ioctl,
DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIGS_SURFACE_UPDATE_VRAM, vigs_surface_update_vram_ioctl,
- DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIGS_SURFACE_UPDATE_GPU, vigs_surface_update_gpu_ioctl,
+ DRM_IOCTL_DEF_DRV(VIGS_SURFACE_START_ACCESS, vigs_surface_start_access_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_SURFACE_END_ACCESS, vigs_surface_end_access_ioctl,
DRM_UNLOCKED | DRM_AUTH)
};
if ((old_mem->mem_type == TTM_PL_VRAM) &&
(new_mem->mem_type == TTM_PL_TT)) {
- DRM_INFO("ttm_move: 0x%llX vram -> gpu\n", bo->addr_space_offset);
-
mman->ops->vram_to_gpu(mman->user_data, bo);
ttm_bo_mem_put(bo, old_mem);
return 0;
} else if ((old_mem->mem_type == TTM_PL_TT) &&
(new_mem->mem_type == TTM_PL_VRAM)) {
- DRM_DEBUG_DRIVER("ttm_move: 0x%llX gpu -> vram\n", bo->addr_space_offset);
-
mman->ops->gpu_to_vram(mman->user_data, bo,
(new_mem->start << PAGE_SHIFT) +
bo->bdev->man[new_mem->mem_type].gpu_offset);
.io_mem_free = &vigs_ttm_io_mem_free,
};
+/*
+ * VMA related.
+ * @{
+ */
+
+static u32 vigs_vma_cache_index = 0;
+static struct vm_operations_struct vigs_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops = NULL;
+
+/*
+ * Represents per-VMA data.
+ *
+ * Since TTM already uses struct vm_area_struct::vm_private_data
+ * we're forced to use some other way to add our own data
+ * to VMA. Currently we use struct vm_area_struct::vm_ops for this.
+ * Generally, TTM should be refactored to not use
+ * struct vm_area_struct directly, but provide helper functions
+ * instead so that user could store whatever he wants into
+ * struct vm_area_struct::vm_private_data.
+ */
+struct vigs_mman_vma
+{
+ struct vm_operations_struct vm_ops;
+ struct vm_area_struct *vma;
+ struct kref kref;
+ u8 data[1];
+};
+
+static void vigs_mman_vma_release(struct kref *kref)
+{
+ struct vigs_mman_vma *vigs_vma =
+ container_of(kref, struct vigs_mman_vma, kref);
+ struct ttm_buffer_object *bo = vigs_vma->vma->vm_private_data;
+ struct vigs_mman *mman = bo_dev_to_vigs_mman(bo->bdev);
+
+ mman->ops->cleanup_vma(mman->user_data, &vigs_vma->data[0]);
+
+ vigs_vma->vma->vm_ops = &vigs_ttm_vm_ops;
+
+ kmem_cache_free(mman->vma_cache, vigs_vma);
+}
+
+/*
+ * @}
+ */
+
int vigs_mman_create(resource_size_t vram_base,
resource_size_t vram_size,
resource_size_t ram_base,
resource_size_t ram_size,
+ uint32_t vma_data_size,
struct vigs_mman_ops *ops,
void *user_data,
struct vigs_mman **mman)
{
int ret = 0;
+ char vma_cache_name[100];
unsigned long num_pages = 0;
DRM_DEBUG_DRIVER("enter\n");
+ BUG_ON(vma_data_size <= 0);
+
*mman = kzalloc(sizeof(**mman), GFP_KERNEL);
if (!*mman) {
goto fail1;
}
+ sprintf(vma_cache_name, "vigs_vma_cache%u", vigs_vma_cache_index++);
+
+ (*mman)->vma_cache = kmem_cache_create(vma_cache_name,
+ sizeof(struct vigs_mman_vma) +
+ vma_data_size - 1,
+ 0, 0, NULL);
+
+ if (!(*mman)->vma_cache) {
+ ret = -ENOMEM;
+ goto fail2;
+ }
+
ret = vigs_mman_global_init(*mman);
if (ret != 0) {
- goto fail2;
+ goto fail3;
}
(*mman)->vram_base = vram_base;
0);
if (ret != 0) {
DRM_ERROR("failed initializing bo driver: %d\n", ret);
- goto fail3;
+ goto fail4;
}
/*
(0xFFFFFFFFUL / PAGE_SIZE));
if (ret != 0) {
DRM_ERROR("failed initializing GPU mm\n");
- goto fail4;
+ goto fail5;
}
/*
num_pages);
if (ret != 0) {
DRM_ERROR("failed initializing VRAM mm\n");
- goto fail5;
+ goto fail6;
}
/*
num_pages);
if (ret != 0) {
DRM_ERROR("failed initializing RAM mm\n");
- goto fail6;
+ goto fail7;
}
/*
return 0;
-fail6:
+fail7:
ttm_bo_clean_mm(&(*mman)->bo_dev, TTM_PL_VRAM);
-fail5:
+fail6:
ttm_bo_clean_mm(&(*mman)->bo_dev, TTM_PL_TT);
-fail4:
+fail5:
ttm_bo_device_release(&(*mman)->bo_dev);
-fail3:
+fail4:
vigs_mman_global_cleanup(*mman);
+fail3:
+ kmem_cache_destroy((*mman)->vma_cache);
fail2:
kfree(*mman);
fail1:
ttm_bo_clean_mm(&mman->bo_dev, TTM_PL_TT);
ttm_bo_device_release(&mman->bo_dev);
vigs_mman_global_cleanup(mman);
+ kmem_cache_destroy(mman->vma_cache);
kfree(mman);
}
-static struct vm_operations_struct vigs_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops = NULL;
+static void vigs_ttm_open(struct vm_area_struct *vma)
+{
+ struct vigs_mman_vma *vigs_vma = (struct vigs_mman_vma*)vma->vm_ops;
+
+ kref_get(&vigs_vma->kref);
+
+ ttm_vm_ops->open(vma);
+}
+
+static void vigs_ttm_close(struct vm_area_struct *vma)
+{
+ struct vigs_mman_vma *vigs_vma = (struct vigs_mman_vma*)vma->vm_ops;
+
+ kref_put(&vigs_vma->kref, &vigs_mman_vma_release);
+
+ ttm_vm_ops->close(vma);
+}
static int vigs_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct file *filp,
struct vm_area_struct *vma)
{
+ struct vigs_mman_vma *vigs_vma;
int ret;
+ struct ttm_buffer_object *bo;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
return drm_mmap(filp, vma);
}
+ vigs_vma = kmem_cache_alloc(mman->vma_cache, GFP_KERNEL);
+
+ if (!vigs_vma) {
+ return -ENOMEM;
+ }
+
ret = ttm_bo_mmap(filp, vma, &mman->bo_dev);
if (unlikely(ret != 0)) {
+ kmem_cache_free(mman->vma_cache, vigs_vma);
return ret;
}
vigs_ttm_vm_ops.fault = &vigs_ttm_fault;
}
- vma->vm_ops = &vigs_ttm_vm_ops;
+ bo = vma->vm_private_data;
+
+ vigs_vma->vm_ops = vigs_ttm_vm_ops;
+ vigs_vma->vma = vma;
+ vigs_vma->vm_ops.open = &vigs_ttm_open;
+ vigs_vma->vm_ops.close = &vigs_ttm_close;
+ kref_init(&vigs_vma->kref);
+ mman->ops->init_vma(mman->user_data, &vigs_vma->data[0], bo);
+
+ vma->vm_ops = &vigs_vma->vm_ops;
return 0;
}
+
+int vigs_mman_access_vma(struct vigs_mman *mman,
+ unsigned long address,
+ vigs_mman_access_vma_func func,
+ void *user_data)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int ret;
+ struct ttm_buffer_object *bo;
+ struct vigs_mman_vma *vigs_vma;
+
+ down_read(&mm->mmap_sem);
+
+ vma = find_vma(mm, address);
+
+ if (!vma ||
+ !vma->vm_ops ||
+ (vma->vm_ops->fault != &vigs_ttm_fault)) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ bo = vma->vm_private_data;
+
+ BUG_ON(!bo);
+
+ if (bo->bdev != &mman->bo_dev) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ vigs_vma = (struct vigs_mman_vma*)vma->vm_ops;
+
+ ret = func(user_data, &vigs_vma->data[0]);
+
+out:
+ up_read(&mm->mmap_sem);
+
+ return ret;
+}
#define _VIGS_MMAN_H_
#include "drmP.h"
+#include <linux/slab.h>
#include <ttm/ttm_bo_driver.h>
struct vigs_mman_ops
/*
* @}
*/
+
+ /*
+ * Per-VMA data init/cleanup. VMA may be opened/closed many times
+ * as the result of split/copy, but the init/cleanup handlers are called
+ * only once, i.e. vigs_mman is handling the reference counts.
+ * @{
+ */
+
+ void (*init_vma)(void *user_data,
+ void *vma_data,
+ struct ttm_buffer_object *bo);
+
+ /*
+ * current's 'mmap_sem' is locked while calling this.
+ */
+ void (*cleanup_vma)(void *user_data, void *vma_data);
+
+ /*
+ * @}
+ */
};
+typedef int (*vigs_mman_access_vma_func)(void *user_data, void *vma_data);
+
struct vigs_mman
{
+ struct kmem_cache *vma_cache;
+
struct drm_global_reference mem_global_ref;
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bo_dev;
resource_size_t vram_size,
resource_size_t ram_base,
resource_size_t ram_size,
+ uint32_t vma_data_size,
struct vigs_mman_ops *ops,
void *user_data,
struct vigs_mman **mman);
struct file *filp,
struct vm_area_struct *vma);
+/*
+ * current's 'mmap_sem' is locked while calling 'func'.
+ */
+int vigs_mman_access_vma(struct vigs_mman *mman,
+ unsigned long address,
+ vigs_mman_access_vma_func func,
+ void *user_data);
+
#endif
#include "vigs_surface.h"
#include "vigs_device.h"
#include "vigs_comm.h"
+#include "vigs_mman.h"
#include <drm/vigs_drm.h>
+/*
+ * Functions below MUST be accessed between
+ * vigs_gem_reserve/vigs_gem_unreserve.
+ * @{
+ */
+
+static u32 vigs_surface_saf(struct vigs_surface *sfc)
+{
+ u32 saf = 0;
+
+ if (sfc->num_readers > 0) {
+ saf |= DRM_VIGS_SAF_READ;
+ }
+
+ if (sfc->num_writers > 0) {
+ saf |= DRM_VIGS_SAF_WRITE;
+ }
+
+ return saf;
+}
+
+static void vigs_surface_saf_changed(struct vigs_surface *sfc,
+ u32 old_saf)
+{
+ u32 new_saf = vigs_surface_saf(sfc);
+
+ if (old_saf == new_saf) {
+ return;
+ }
+
+ /*
+ * If we're in GPU and access is write-only then we can
+ * obviously skip first VRAM update, since there's nothing
+ * to read back yet. After first VRAM update, however, we must
+ * read back every time since the clients must see their
+ * changes.
+ */
+
+ sfc->skip_vram_update = !vigs_gem_in_vram(&sfc->gem) &&
+ (new_saf == DRM_VIGS_SAF_WRITE) &&
+ !(old_saf & DRM_VIGS_SAF_WRITE);
+}
+
+static void vigs_vma_data_end_access(struct vigs_vma_data *vma_data, bool sync)
+{
+ struct vigs_surface *sfc = vma_data->sfc;
+ struct vigs_device *vigs_dev = sfc->gem.base.dev->dev_private;
+ u32 old_saf = vigs_surface_saf(sfc);
+
+ if (vma_data->saf & DRM_VIGS_SAF_READ) {
+ --sfc->num_readers;
+ }
+
+ if ((vma_data->saf & DRM_VIGS_SAF_WRITE) == 0) {
+ goto out;
+ }
+
+ if (sync) {
+ /*
+ * We have a sync, drop all pending
+ * writers.
+ */
+ sfc->num_writers -= sfc->num_pending_writers;
+ sfc->num_pending_writers = 0;
+ }
+
+ if (!vigs_gem_in_vram(&sfc->gem)) {
+ --sfc->num_writers;
+ goto out;
+ }
+
+ if (sync) {
+ --sfc->num_writers;
+ vigs_comm_update_gpu(vigs_dev->comm,
+ sfc->id,
+ sfc->width,
+ sfc->height,
+ vigs_gem_offset(&sfc->gem));
+ sfc->is_gpu_dirty = false;
+ } else {
+ ++sfc->num_pending_writers;
+ }
+
+out:
+ vma_data->saf = 0;
+
+ vigs_surface_saf_changed(sfc, old_saf);
+}
+
+/*
+ * @}
+ */
+
+void vigs_vma_data_init(struct vigs_vma_data *vma_data,
+ struct vigs_surface *sfc)
+{
+ vma_data->sfc = sfc;
+ vma_data->saf = 0;
+}
+
+void vigs_vma_data_cleanup(struct vigs_vma_data *vma_data)
+{
+ vigs_gem_reserve(&vma_data->sfc->gem);
+
+ vigs_vma_data_end_access(vma_data, true);
+
+ vigs_gem_unreserve(&vma_data->sfc->gem);
+}
+
static void vigs_surface_destroy(struct vigs_gem_object *gem)
{
struct vigs_surface *sfc = vigs_gem_to_vigs_surface(gem);
return ret;
}
+bool vigs_surface_need_vram_update(struct vigs_surface *sfc)
+{
+ u32 saf = vigs_surface_saf(sfc);
+ bool skip_vram_update = sfc->skip_vram_update;
+
+ sfc->skip_vram_update = false;
+
+ return (saf != 0) && !skip_vram_update;
+}
+
+bool vigs_surface_need_gpu_update(struct vigs_surface *sfc)
+{
+ u32 old_saf = vigs_surface_saf(sfc);
+
+ sfc->num_writers -= sfc->num_pending_writers;
+ sfc->num_pending_writers = 0;
+
+ vigs_surface_saf_changed(sfc, old_saf);
+
+ return old_saf & DRM_VIGS_SAF_WRITE;
+}
+
int vigs_surface_create_ioctl(struct drm_device *drm_dev,
void *data,
struct drm_file *file_priv)
return 0;
}
-int vigs_surface_update_vram_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv)
+static int vigs_surface_start_access(void *user_data, void *vma_data_opaque)
{
- struct vigs_device *vigs_dev = drm_dev->dev_private;
- struct drm_vigs_surface_update_vram *args = data;
- struct drm_gem_object *gem;
- struct vigs_gem_object *vigs_gem;
- struct vigs_surface *sfc;
-
- gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
+ struct drm_vigs_surface_start_access *args = user_data;
+ struct vigs_vma_data *vma_data = vma_data_opaque;
+ struct vigs_surface *sfc = vma_data->sfc;
+ struct vigs_device *vigs_dev;
+ u32 old_saf;
- if (gem == NULL) {
+ if (!sfc) {
return -ENOENT;
}
- vigs_gem = gem_to_vigs_gem(gem);
+ vigs_dev = sfc->gem.base.dev->dev_private;
- if (vigs_gem->type != VIGS_GEM_TYPE_SURFACE) {
- drm_gem_object_unreference_unlocked(gem);
- return -ENOENT;
+ if ((args->saf & ~DRM_VIGS_SAF_MASK) != 0) {
+ return -EINVAL;
}
- sfc = vigs_gem_to_vigs_surface(vigs_gem);
-
vigs_gem_reserve(&sfc->gem);
- if (vigs_gem_in_vram(&sfc->gem) && sfc->is_gpu_dirty) {
- vigs_comm_update_vram(vigs_dev->comm,
- sfc->id,
- vigs_gem_offset(vigs_gem));
- sfc->is_gpu_dirty = false;
+ old_saf = vigs_surface_saf(sfc);
+
+ if (vma_data->saf & DRM_VIGS_SAF_READ) {
+ --sfc->num_readers;
}
- vigs_gem_unreserve(&sfc->gem);
+ if (vma_data->saf & DRM_VIGS_SAF_WRITE) {
+ --sfc->num_writers;
+ }
- drm_gem_object_unreference_unlocked(gem);
+ if (args->saf & DRM_VIGS_SAF_WRITE) {
+ ++sfc->num_writers;
+ }
+
+ if (args->saf & DRM_VIGS_SAF_READ) {
+ ++sfc->num_readers;
+
+ if (vigs_gem_in_vram(&sfc->gem) && sfc->is_gpu_dirty) {
+ vigs_comm_update_vram(vigs_dev->comm,
+ sfc->id,
+ vigs_gem_offset(&sfc->gem));
+ sfc->is_gpu_dirty = false;
+ }
+ }
+
+ vma_data->saf = args->saf;
+
+ vigs_surface_saf_changed(sfc, old_saf);
+
+ vigs_gem_unreserve(&sfc->gem);
return 0;
}
-int vigs_surface_update_gpu_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv)
+int vigs_surface_start_access_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
{
struct vigs_device *vigs_dev = drm_dev->dev_private;
- struct drm_vigs_surface_update_gpu *args = data;
- struct drm_gem_object *gem;
- struct vigs_gem_object *vigs_gem;
- struct vigs_surface *sfc;
+ struct drm_vigs_surface_start_access *args = data;
- gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
-
- if (gem == NULL) {
- return -ENOENT;
- }
+ return vigs_mman_access_vma(vigs_dev->mman,
+ args->address,
+ &vigs_surface_start_access,
+ args);
+}
- vigs_gem = gem_to_vigs_gem(gem);
+static int vigs_surface_end_access(void *user_data, void *vma_data_opaque)
+{
+ struct drm_vigs_surface_end_access *args = user_data;
+ struct vigs_vma_data *vma_data = vma_data_opaque;
+ struct vigs_surface *sfc = vma_data->sfc;
- if (vigs_gem->type != VIGS_GEM_TYPE_SURFACE) {
- drm_gem_object_unreference_unlocked(gem);
+ if (!sfc) {
return -ENOENT;
}
- sfc = vigs_gem_to_vigs_surface(vigs_gem);
-
vigs_gem_reserve(&sfc->gem);
- if (vigs_gem_in_vram(&sfc->gem)) {
- vigs_comm_update_gpu(vigs_dev->comm,
- sfc->id,
- sfc->width,
- sfc->height,
- vigs_gem_offset(vigs_gem));
- sfc->is_gpu_dirty = false;
- }
+ vigs_vma_data_end_access(vma_data, args->sync);
vigs_gem_unreserve(&sfc->gem);
- drm_gem_object_unreference_unlocked(gem);
-
return 0;
}
+
+int vigs_surface_end_access_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct drm_vigs_surface_end_access *args = data;
+
+ return vigs_mman_access_vma(vigs_dev->mman,
+ args->address,
+ &vigs_surface_end_access,
+ args);
+}
bool is_gpu_dirty;
+ /*
+ * Number of mmap areas (vmas) that accessed this surface for
+ * read/write.
+ * @{
+ */
+ u32 num_readers;
+ u32 num_writers;
+ /*
+ * @}
+ */
+
+ /*
+ * Number of mmap area writers that ended access asynchronously, i.e.
+ * they still account for in 'num_writers', but as soon as first GPU
+ * update operation takes place they'll be gone.
+ */
+ u32 num_pending_writers;
+
+ /*
+ * Specifies that we should not update VRAM on next 'update_vram'
+ * request. Lasts for one request.
+ */
+ bool skip_vram_update;
+
/*
* @}
*/
};
+struct vigs_vma_data
+{
+ struct vigs_surface *sfc;
+ u32 saf;
+};
+
+void vigs_vma_data_init(struct vigs_vma_data *vma_data,
+ struct vigs_surface *sfc);
+
+void vigs_vma_data_cleanup(struct vigs_vma_data *vma_data);
+
static inline struct vigs_surface *vigs_gem_to_vigs_surface(struct vigs_gem_object *vigs_gem)
{
return container_of(vigs_gem, struct vigs_surface, gem);
vigsp_surface_format format,
struct vigs_surface **sfc);
+/*
+ * Functions below MUST be accessed between
+ * vigs_gem_reserve/vigs_gem_unreserve.
+ * @{
+ */
+
+bool vigs_surface_need_vram_update(struct vigs_surface *sfc);
+
+bool vigs_surface_need_gpu_update(struct vigs_surface *sfc);
+
+/*
+ * @}
+ */
+
/*
* IOCTLs
* @{
void *data,
struct drm_file *file_priv);
-int vigs_surface_update_vram_ioctl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv);
+int vigs_surface_start_access_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
-int vigs_surface_update_gpu_ioctl(struct drm_device *drm_dev,
+int vigs_surface_end_access_ioctl(struct drm_device *drm_dev,
void *data,
struct drm_file *file_priv);
/*
* Bump this whenever driver interface changes.
*/
-#define DRM_VIGS_DRIVER_VERSION 7
+#define DRM_VIGS_DRIVER_VERSION 8
+
+/*
+ * Surface access flags.
+ */
+#define DRM_VIGS_SAF_READ 1
+#define DRM_VIGS_SAF_WRITE 2
+#define DRM_VIGS_SAF_MASK 3
struct drm_vigs_get_protocol_version
{
uint32_t handle;
};
-struct drm_vigs_surface_update_vram
+struct drm_vigs_surface_start_access
{
- uint32_t handle;
+ unsigned long address;
+ uint32_t saf;
};
-struct drm_vigs_surface_update_gpu
+struct drm_vigs_surface_end_access
{
- uint32_t handle;
+ unsigned long address;
+ int sync;
};
#define DRM_VIGS_GET_PROTOCOL_VERSION 0x00
#define DRM_VIGS_SURFACE_INFO 0x03
#define DRM_VIGS_EXEC 0x04
#define DRM_VIGS_SURFACE_SET_GPU_DIRTY 0x05
-#define DRM_VIGS_SURFACE_UPDATE_VRAM 0x06
-#define DRM_VIGS_SURFACE_UPDATE_GPU 0x07
+#define DRM_VIGS_SURFACE_START_ACCESS 0x06
+#define DRM_VIGS_SURFACE_END_ACCESS 0x07
#define DRM_IOCTL_VIGS_GET_PROTOCOL_VERSION DRM_IOR(DRM_COMMAND_BASE + \
DRM_VIGS_GET_PROTOCOL_VERSION, struct drm_vigs_get_protocol_version)
DRM_VIGS_EXEC, struct drm_vigs_exec)
#define DRM_IOCTL_VIGS_SURFACE_SET_GPU_DIRTY DRM_IOW(DRM_COMMAND_BASE + \
DRM_VIGS_SURFACE_SET_GPU_DIRTY, struct drm_vigs_surface_set_gpu_dirty)
-#define DRM_IOCTL_VIGS_SURFACE_UPDATE_VRAM DRM_IOW(DRM_COMMAND_BASE + \
- DRM_VIGS_SURFACE_UPDATE_VRAM, struct drm_vigs_surface_update_vram)
-#define DRM_IOCTL_VIGS_SURFACE_UPDATE_GPU DRM_IOW(DRM_COMMAND_BASE + \
- DRM_VIGS_SURFACE_UPDATE_GPU, struct drm_vigs_surface_update_gpu)
+#define DRM_IOCTL_VIGS_SURFACE_START_ACCESS DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_SURFACE_START_ACCESS, struct drm_vigs_surface_start_access)
+#define DRM_IOCTL_VIGS_SURFACE_END_ACCESS DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_SURFACE_END_ACCESS, struct drm_vigs_surface_end_access)
#endif