struct vigsp_cmd_copy_request *copy;
struct vigsp_cmd_solid_fill_request *solid_fill;
struct vigsp_cmd_ga_copy_request *ga_copy;
+ struct vigsp_cmd_convert_request *convert;
void *data;
};
case vigsp_cmd_ga_copy:
*num_buffers += 2;
break;
+ case vigsp_cmd_convert:
+ *num_buffers += 2;
+ break;
default:
break;
}
++*num_buffers;
break;
+ case vigsp_cmd_convert:
+ ret = vigs_execbuffer_validate_buffer(vigs_dev,
+ &(*buffers)[*num_buffers],
+ list,
+ request.convert->src_id,
+ request_header->cmd,
+ 0,
+ request.data);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ++*num_buffers;
+
+ ret = vigs_execbuffer_validate_buffer(vigs_dev,
+ &(*buffers)[*num_buffers],
+ list,
+ request.convert->dst_id,
+ request_header->cmd,
+ 1,
+ request.data);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ++*num_buffers;
+
+ break;
default:
break;
}
}
}
break;
+ case vigsp_cmd_convert:
+ if (buffers[i].which && vigs_gem_in_vram(&sfc->gem)) {
+ sfc->is_gpu_dirty = true;
+ }
+ break;
default:
break;
}
#include "vigs_device.h"
#include "vigs_comm.h"
#include "vigs_mman.h"
+#include "vigs_execbuffer.h"
+#include "vigs_file.h"
#include <drm/vigs_drm.h>
/*
&vigs_surface_end_access,
args);
}
+
+int vigs_surface_convert_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct vigs_file *vigs_file = file_priv->driver_priv;
+ struct drm_vigs_surface_convert *args = data;
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+ struct vigs_surface *src_sfc = NULL, *dst_sfc = NULL;
+ struct vigsp_cmd_batch_header *batch_header;
+ struct vigsp_cmd_request_header *convert_request_header;
+ struct vigsp_cmd_convert_request *convert_request;
+ int ret;
+
+ /*
+ * Currently we support only y-invert operation. Format conversion still
+ * needs to be implemented.
+ */
+ if (args->src_format != args->dst_format) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* src_sfc */
+ gem = drm_gem_object_lookup(drm_dev, file_priv, args->src_handle);
+
+ if (gem == NULL) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ if (vigs_gem->type != VIGS_GEM_TYPE_SURFACE) {
+ drm_gem_object_unreference_unlocked(gem);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ src_sfc = vigs_gem_to_vigs_surface(vigs_gem);
+
+ /* dst_sfc */
+ gem = drm_gem_object_lookup(drm_dev, file_priv, args->dst_handle);
+
+ if (gem == NULL) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ if (vigs_gem->type != VIGS_GEM_TYPE_SURFACE) {
+ drm_gem_object_unreference_unlocked(gem);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ dst_sfc = vigs_gem_to_vigs_surface(vigs_gem);
+
+ vigs_gem_reserve(&src_sfc->gem);
+
+ if (vigs_gem_in_vram(&src_sfc->gem) &&
+ vigs_surface_need_gpu_update(src_sfc)) {
+ vigs_comm_update_gpu(vigs_dev->comm,
+ src_sfc->id,
+ src_sfc->width,
+ src_sfc->height,
+ vigs_gem_offset(&src_sfc->gem));
+ src_sfc->is_gpu_dirty = false;
+ }
+
+ vigs_gem_unreserve(&src_sfc->gem);
+
+ mutex_lock(&vigs_file->mutex);
+
+ if (vigs_file->execbuffer == NULL) {
+ ret = vigs_execbuffer_create(vigs_dev,
+ 4096,
+ true,
+ &vigs_file->execbuffer);
+
+ if (ret != 0) {
+ DRM_ERROR("unable to create execbuffer\n");
+
+ goto out_unlock;
+ }
+
+ vigs_gem_reserve(&vigs_file->execbuffer->gem);
+
+ ret = vigs_gem_kmap(&vigs_file->execbuffer->gem);
+
+ vigs_gem_unreserve(&vigs_file->execbuffer->gem);
+
+ if (ret != 0) {
+ DRM_ERROR("unable to kmap execbuffer\n");
+
+ drm_gem_object_unreference_unlocked(&vigs_file->execbuffer->gem.base);
+ vigs_file->execbuffer = NULL;
+
+ goto out_unlock;
+ }
+ }
+
+ batch_header = vigs_file->execbuffer->gem.kptr;
+ convert_request_header = (struct vigsp_cmd_request_header *)(batch_header + 1);
+ convert_request = (struct vigsp_cmd_convert_request *)(convert_request_header + 1);
+
+ batch_header->fence_seq = 0;
+ batch_header->size = sizeof(*convert_request_header) +
+ sizeof(*convert_request);
+
+ convert_request_header->cmd = vigsp_cmd_convert;
+ convert_request_header->size = sizeof(*convert_request);
+
+ convert_request->src_id = src_sfc->id;
+ convert_request->src_format = args->src_format;
+ convert_request->dst_id = dst_sfc->id;
+ convert_request->dst_format = args->dst_format;
+ convert_request->y_invert = args->y_invert;
+
+ ret = vigs_comm_exec_checked(vigs_dev->comm, vigs_file->execbuffer);
+
+out_unlock:
+ mutex_unlock(&vigs_file->mutex);
+
+out:
+ if (src_sfc) {
+ drm_gem_object_unreference_unlocked(&src_sfc->gem.base);
+ }
+
+ if (dst_sfc) {
+ drm_gem_object_unreference_unlocked(&dst_sfc->gem.base);
+ }
+
+ return ret;
+}
int sync;
};
+struct drm_vigs_surface_convert
+{
+ uint32_t src_handle;
+ uint32_t src_format;
+ uint32_t dst_handle;
+ uint32_t dst_format;
+ int y_invert;
+};
+
struct drm_vigs_create_fence
{
int send;
#define DRM_VIGS_DP_CREATE_SURFACE 0x20
#define DRM_VIGS_DP_OPEN_SURFACE 0x21
+#define DRM_VIGS_SURFACE_CONVERT 0x22
+
#define DRM_IOCTL_VIGS_GET_PROTOCOL_VERSION DRM_IOR(DRM_COMMAND_BASE + \
DRM_VIGS_GET_PROTOCOL_VERSION, struct drm_vigs_get_protocol_version)
#define DRM_IOCTL_VIGS_CREATE_SURFACE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_VIGS_SURFACE_START_ACCESS, struct drm_vigs_surface_start_access)
#define DRM_IOCTL_VIGS_SURFACE_END_ACCESS DRM_IOW(DRM_COMMAND_BASE + \
DRM_VIGS_SURFACE_END_ACCESS, struct drm_vigs_surface_end_access)
+#define DRM_IOCTL_VIGS_SURFACE_CONVERT DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_SURFACE_CONVERT, struct drm_vigs_surface_convert)
#define DRM_IOCTL_VIGS_CREATE_FENCE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_VIGS_CREATE_FENCE, struct drm_vigs_create_fence)
#define DRM_IOCTL_VIGS_FENCE_WAIT DRM_IOW(DRM_COMMAND_BASE + \