static void vigs_device_mman_init_vma(void *user_data,
void *vma_data_opaque,
- struct ttm_buffer_object *bo)
+ struct ttm_buffer_object *bo,
+ bool track_access)
{
struct vigs_vma_data *vma_data = vma_data_opaque;
struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
return;
}
- vigs_vma_data_init(vma_data, vigs_gem_to_vigs_surface(vigs_gem));
+ vigs_vma_data_init(vma_data,
+ vigs_gem_to_vigs_surface(vigs_gem),
+ track_access);
}
static void vigs_device_mman_cleanup_vma(void *user_data,
return -EINVAL;
}
- return vigs_mman_mmap(vigs_dev->mman, filp, vma);
+ return vigs_mman_mmap(vigs_dev->mman,
+ filp,
+ vma,
+ vigs_dev->track_gem_access);
}
int vigs_device_add_surface(struct vigs_device *vigs_dev,
struct vigs_comm *comm;
struct vigs_fbdev *fbdev;
+
+ /*
+ * A hack we're forced to have in order to tell if we
+ * need to track GEM access or not in 'vigs_device_mmap'.
+ * current's 'mmap_sem' is write-locked while this is true,
+ * so no race will occur.
+ */
+ bool track_gem_access;
};
int vigs_device_init(struct vigs_device *vigs_dev,
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(VIGS_CREATE_EXECBUFFER, vigs_execbuffer_create_ioctl,
DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIGS_GEM_MAP, vigs_gem_map_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(VIGS_SURFACE_INFO, vigs_surface_info_ioctl,
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(VIGS_EXEC, vigs_device_exec_ioctl,
if (ret == 0) {
args->size = vigs_gem_size(&execbuffer->gem);
args->handle = handle;
- args->mmap_offset = vigs_gem_mmap_offset(&execbuffer->gem);
}
return ret;
{
}
+int vigs_gem_map_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct drm_vigs_gem_map *args = data;
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+ struct mm_struct *mm = current->mm;
+ unsigned long address;
+
+ gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
+
+ if (gem == NULL) {
+ return -ENOENT;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ down_write(&mm->mmap_sem);
+
+ /*
+ * We can't use 'do_mmap' here (like in i915, exynos and others) because
+ * 'do_mmap' takes an offset in bytes and our
+ * offset is 64-bit (since it's TTM offset) and it can't fit into 32-bit
+ * variable.
+ * For this to work we had to export
+ * 'do_mmap_pgoff'. 'do_mmap_pgoff' was exported prior to
+ * 3.4 and it's available after 3.5, but for some reason it's
+ * static in 3.4.
+ */
+ vigs_dev->track_gem_access = args->track_access;
+ address = do_mmap_pgoff(file_priv->filp, 0, vigs_gem_size(vigs_gem),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ vigs_gem_mmap_offset(vigs_gem) >> PAGE_SHIFT);
+ vigs_dev->track_gem_access = false;
+
+ up_write(&mm->mmap_sem);
+
+ drm_gem_object_unreference_unlocked(gem);
+
+ if (IS_ERR((void*)address)) {
+ return PTR_ERR((void*)address);
+ }
+
+ args->address = address;
+
+ return 0;
+}
+
int vigs_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *drm_dev,
struct drm_mode_create_dumb *args)
* @}
*/
+/*
+ * IOCTLs
+ * @{
+ */
+
+int vigs_gem_map_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
/*
* Dumb
* @{
int vigs_mman_mmap(struct vigs_mman *mman,
struct file *filp,
- struct vm_area_struct *vma)
+ struct vm_area_struct *vma,
+ bool track_access)
{
struct vigs_mman_vma *vigs_vma;
int ret;
vigs_vma->vm_ops.open = &vigs_ttm_open;
vigs_vma->vm_ops.close = &vigs_ttm_close;
kref_init(&vigs_vma->kref);
- mman->ops->init_vma(mman->user_data, &vigs_vma->data[0], bo);
+ mman->ops->init_vma(mman->user_data,
+ &vigs_vma->data[0],
+ bo,
+ track_access);
vma->vm_ops = &vigs_vma->vm_ops;
* Per-VMA data init/cleanup. VMA may be opened/closed many times
* as the result of split/copy, but the init/cleanup handlers are called
* only once, i.e. vigs_mman is handling the reference counts.
+ *
+ * current's 'mmap_sem' is locked while calling this.
* @{
*/
void (*init_vma)(void *user_data,
void *vma_data,
- struct ttm_buffer_object *bo);
+ struct ttm_buffer_object *bo,
+ bool track_access);
/*
* current's 'mmap_sem' is locked while calling this.
int vigs_mman_mmap(struct vigs_mman *mman,
struct file *filp,
- struct vm_area_struct *vma);
+ struct vm_area_struct *vma,
+ bool track_access);
/*
* current's 'mmap_sem' is locked while calling 'func'.
*/
void vigs_vma_data_init(struct vigs_vma_data *vma_data,
- struct vigs_surface *sfc)
+ struct vigs_surface *sfc,
+ bool track_access)
{
+ struct vigs_device *vigs_dev = sfc->gem.base.dev->dev_private;
+ u32 old_saf;
+
vma_data->sfc = sfc;
vma_data->saf = 0;
+ vma_data->track_access = track_access;
+
+ if (track_access) {
+ return;
+ }
+
+ /*
+ * If we don't want to track access for this VMA
+ * then register as both reader and writer.
+ */
+
+ vigs_gem_reserve(&sfc->gem);
+
+ old_saf = vigs_surface_saf(sfc);
+
+ ++sfc->num_writers;
+ ++sfc->num_readers;
+
+ if (vigs_gem_in_vram(&sfc->gem) && sfc->is_gpu_dirty) {
+ vigs_comm_update_vram(vigs_dev->comm,
+ sfc->id,
+ vigs_gem_offset(&sfc->gem));
+ sfc->is_gpu_dirty = false;
+ }
+
+ vma_data->saf = DRM_VIGS_SAF_READ | DRM_VIGS_SAF_WRITE;
+
+ vigs_surface_saf_changed(sfc, old_saf);
+
+ vigs_gem_unreserve(&sfc->gem);
}
void vigs_vma_data_cleanup(struct vigs_vma_data *vma_data)
{
vigs_gem_reserve(&vma_data->sfc->gem);
- vigs_vma_data_end_access(vma_data, true);
+ /*
+ * On unmap we sync only when access tracking is enabled.
+ * Otherwise, we pretend we're going to sync
+ * some time later, but we never will.
+ */
+ vigs_vma_data_end_access(vma_data,
+ vma_data->track_access);
vigs_gem_unreserve(&vma_data->sfc->gem);
}
if (ret == 0) {
args->handle = handle;
args->size = vigs_gem_size(&sfc->gem);
- args->mmap_offset = vigs_gem_mmap_offset(&sfc->gem);
args->id = sfc->id;
}
args->stride = sfc->stride;
args->format = sfc->format;
args->size = vigs_gem_size(vigs_gem);
- args->mmap_offset = vigs_gem_mmap_offset(vigs_gem);
args->id = sfc->id;
drm_gem_object_unreference_unlocked(gem);
return -ENOENT;
}
+ if (!vma_data->track_access) {
+ return 0;
+ }
+
vigs_dev = sfc->gem.base.dev->dev_private;
if ((args->saf & ~DRM_VIGS_SAF_MASK) != 0) {
return -ENOENT;
}
+ if (!vma_data->track_access) {
+ return 0;
+ }
+
vigs_gem_reserve(&sfc->gem);
vigs_vma_data_end_access(vma_data, args->sync);
{
struct vigs_surface *sfc;
u32 saf;
+ bool track_access;
};
void vigs_vma_data_init(struct vigs_vma_data *vma_data,
- struct vigs_surface *sfc);
+ struct vigs_surface *sfc,
+ bool track_access);
void vigs_vma_data_cleanup(struct vigs_vma_data *vma_data);
/*
* Bump this whenever driver interface changes.
*/
-#define DRM_VIGS_DRIVER_VERSION 8
+#define DRM_VIGS_DRIVER_VERSION 9
/*
* Surface access flags.
uint32_t format;
uint32_t handle;
uint32_t size;
- uint64_t mmap_offset;
uint32_t id;
};
{
uint32_t size;
uint32_t handle;
- uint64_t mmap_offset;
+};
+
+struct drm_vigs_gem_map
+{
+ uint32_t handle;
+ int track_access;
+ unsigned long address;
};
struct drm_vigs_surface_info
uint32_t stride;
uint32_t format;
uint32_t size;
- uint64_t mmap_offset;
uint32_t id;
};
#define DRM_VIGS_GET_PROTOCOL_VERSION 0x00
#define DRM_VIGS_CREATE_SURFACE 0x01
#define DRM_VIGS_CREATE_EXECBUFFER 0x02
-#define DRM_VIGS_SURFACE_INFO 0x03
-#define DRM_VIGS_EXEC 0x04
-#define DRM_VIGS_SURFACE_SET_GPU_DIRTY 0x05
-#define DRM_VIGS_SURFACE_START_ACCESS 0x06
-#define DRM_VIGS_SURFACE_END_ACCESS 0x07
+#define DRM_VIGS_GEM_MAP 0x03
+#define DRM_VIGS_SURFACE_INFO 0x04
+#define DRM_VIGS_EXEC 0x05
+#define DRM_VIGS_SURFACE_SET_GPU_DIRTY 0x06
+#define DRM_VIGS_SURFACE_START_ACCESS 0x07
+#define DRM_VIGS_SURFACE_END_ACCESS 0x08
#define DRM_IOCTL_VIGS_GET_PROTOCOL_VERSION DRM_IOR(DRM_COMMAND_BASE + \
DRM_VIGS_GET_PROTOCOL_VERSION, struct drm_vigs_get_protocol_version)
DRM_VIGS_CREATE_SURFACE, struct drm_vigs_create_surface)
#define DRM_IOCTL_VIGS_CREATE_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + \
DRM_VIGS_CREATE_EXECBUFFER, struct drm_vigs_create_execbuffer)
+#define DRM_IOCTL_VIGS_GEM_MAP DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_GEM_MAP, struct drm_vigs_gem_map)
#define DRM_IOCTL_VIGS_SURFACE_INFO DRM_IOWR(DRM_COMMAND_BASE + \
DRM_VIGS_SURFACE_INFO, struct drm_vigs_surface_info)
#define DRM_IOCTL_VIGS_EXEC DRM_IOW(DRM_COMMAND_BASE + \