struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
+
+ uint32_t width, height, format;
};
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, gem_base)
};
/* virtio_ioctl.c */
-#define DRM_VIRTIO_NUM_IOCTLS 10
+#define DRM_VIRTIO_NUM_IOCTLS 11
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
/* virtio_kms.c */
void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj);
int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
-int virtio_gpu_gem_create(struct drm_file *file,
- struct drm_device *dev,
- uint64_t size,
- struct drm_gem_object **obj_p,
+int virtio_gpu_gem_create(struct drm_file *file, struct drm_device *dev,
+ uint64_t size, uint32_t width, uint32_t height,
+ uint32_t format, struct drm_gem_object **obj_p,
uint32_t *handle_p);
int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file);
struct drm_file *file);
struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
size_t size, bool kernel,
- bool pinned);
+ bool pinned, uint32_t width,
+ uint32_t height,
+ uint32_t format);
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
/* virtio_gpu_object */
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
unsigned long size, bool kernel, bool pinned,
+ uint32_t width, uint32_t height, uint32_t format,
struct virtio_gpu_object **bo_ptr);
int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr);
int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
}
size = mode_cmd.pitches[0] * mode_cmd.height;
- obj = virtio_gpu_alloc_object(dev, size, false, true);
+ obj = virtio_gpu_alloc_object(dev, size, false, true, mode_cmd.width,
+ mode_cmd.height, format);
if (IS_ERR(obj))
return PTR_ERR(obj);
struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
size_t size, bool kernel,
- bool pinned)
+ bool pinned, uint32_t width,
+ uint32_t height,
+ uint32_t format)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *obj;
int ret;
- ret = virtio_gpu_object_create(vgdev, size, kernel, pinned, &obj);
+ ret = virtio_gpu_object_create(vgdev, size, kernel, pinned, width,
+ height, format, &obj);
if (ret)
return ERR_PTR(ret);
return obj;
}
-int virtio_gpu_gem_create(struct drm_file *file,
- struct drm_device *dev,
- uint64_t size,
- struct drm_gem_object **obj_p,
+int virtio_gpu_gem_create(struct drm_file *file, struct drm_device *dev,
+ uint64_t size, uint32_t width, uint32_t height,
+ uint32_t format, struct drm_gem_object **obj_p,
uint32_t *handle_p)
{
struct virtio_gpu_object *obj;
int ret;
u32 handle;
- obj = virtio_gpu_alloc_object(dev, size, false, false);
+ obj = virtio_gpu_alloc_object(dev, size, false, false, width, height,
+ format);
if (IS_ERR(obj))
return PTR_ERR(obj);
args->size = pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
- ret = virtio_gpu_gem_create(file_priv, dev, args->size, &gobj,
- &args->handle);
+ ret = virtio_gpu_gem_create(file_priv, dev, args->size, args->width,
+ args->height, 2, &gobj, &args->handle);
if (ret)
goto fail;
if (size == 0)
size = PAGE_SIZE;
- qobj = virtio_gpu_alloc_object(dev, size, false, false);
+ qobj = virtio_gpu_alloc_object(dev, size, false, false, rc->width,
+ rc->height, rc->format);
if (IS_ERR(qobj)) {
ret = PTR_ERR(qobj);
goto fail_id;
return 0;
}
+static int virtio_gpu_brutal_hack(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_virtgpu_brutal_hack *bh = data;
+ struct drm_gem_object *gobj = NULL;
+ struct virtio_gpu_object *qobj = NULL;
+
+ gobj = drm_gem_object_lookup(dev, file, bh->bo_handle);
+ if (gobj == NULL) {
+ return -ENOENT;
+ }
+
+ qobj = gem_to_virtio_gpu_obj(gobj);
+
+ bh->width = qobj->width;
+ bh->height = qobj->height;
+ bh->pitch = qobj->width * 4;
+ switch (qobj->format) {
+ case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
+ bh->format = DRM_FORMAT_XRGB8888;
+ break;
+ case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
+ bh->format = DRM_FORMAT_ARGB8888;
+ break;
+ case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
+ bh->format = DRM_FORMAT_BGRX8888;
+ break;
+ case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
+ bh->format = DRM_FORMAT_BGRA8888;
+ break;
+ default:
+ bh->format = 0;
+ break;
+ }
+
+ drm_gem_object_unreference_unlocked(gobj);
+ return 0;
+}
+
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(VIRTGPU_BRUTAL_HACK, virtio_gpu_brutal_hack,
+ DRM_RENDER_ALLOW),
};
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
unsigned long size, bool kernel, bool pinned,
+ uint32_t width, uint32_t height, uint32_t format,
struct virtio_gpu_object **bo_ptr)
{
struct virtio_gpu_object *bo;
if (ret != 0)
return ret;
+ bo->width = width;
+ bo->height = height;
+ bo->format = format;
+
*bo_ptr = bo;
return 0;
}
#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
+#define DRM_VIRTGPU_BRUTAL_HACK 0x0A
struct drm_virtgpu_map {
uint64_t offset; /* use for mmap system call */
uint32_t pad;
};
+struct drm_virtgpu_brutal_hack {
+ __u32 bo_handle;
+ __u32 width;
+ __u32 height;
+ __u32 pitch;
+ __u32 format;
+};
+
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
struct drm_virtgpu_get_caps)
+#define DRM_IOCTL_VIRTGPU_BRUTAL_HACK \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_BRUTAL_HACK, \
+ struct drm_virtgpu_brutal_hack)
+
#endif