struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
+ bool created;
};
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, gem_base)
uint32_t *resid);
void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
uint32_t resource_id,
uint32_t format,
uint32_t width,
struct virtio_gpu_fence **fence);
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
struct virtio_gpu_resource_create_3d *rc_3d,
struct virtio_gpu_fence **fence);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
return PTR_ERR(obj);
virtio_gpu_resource_id_get(vgdev, &resid);
- virtio_gpu_cmd_create_resource(vgdev, resid, format,
+ virtio_gpu_cmd_create_resource(vgdev, obj, resid, format,
mode_cmd.width, mode_cmd.height);
ret = virtio_gpu_object_kmap(obj);
goto fail;
format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
+ obj = gem_to_virtio_gpu_obj(gobj);
virtio_gpu_resource_id_get(vgdev, &resid);
- virtio_gpu_cmd_create_resource(vgdev, resid, format,
+ virtio_gpu_cmd_create_resource(vgdev, obj, resid, format,
args->width, args->height);
/* attach the object to the resource */
obj = &qobj->gem_base;
if (!vgdev->has_virgl_3d) {
- virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
+ virtio_gpu_cmd_create_resource(vgdev, qobj, res_id, rc->format,
rc->width, rc->height);
ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
rc_3d.flags = cpu_to_le32(rc->flags);
- virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
+ virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d, NULL);
ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
if (ret) {
ttm_eu_backoff_reservation(&ticket, &validate_list);
bo = container_of(tbo, struct virtio_gpu_object, tbo);
vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
- if (bo->hw_res_handle)
+ if (bo->created)
virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
if (bo->pages)
virtio_gpu_object_free_sg_table(bo);
/* create a basic resource */
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
uint32_t resource_id,
uint32_t format,
uint32_t width,
cmd_p->height = cpu_to_le32(height);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ bo->created = true;
}
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
struct virtio_gpu_resource_create_3d *rc_3d,
struct virtio_gpu_fence **fence)
{
cmd_p->hdr.flags = 0;
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ bo->created = true;
}
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct scatterlist *sg;
int si, nents;
+ if (!obj->created)
+ return 0;
+
if (!obj->pages) {
int ret;