2 * Copyright (C) 2015 Red Hat, Inc.
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
34 #include <drm/drm_edid.h>
36 #include "virtgpu_drv.h"
37 #include "virtgpu_trace.h"
39 #define MAX_INLINE_CMD_SIZE 96
40 #define MAX_INLINE_RESP_SIZE 24
41 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
42 + MAX_INLINE_CMD_SIZE \
43 + MAX_INLINE_RESP_SIZE)
45 static void convert_to_hw_box(struct virtio_gpu_box *dst,
46 const struct drm_virtgpu_3d_box *src)
48 dst->x = cpu_to_le32(src->x);
49 dst->y = cpu_to_le32(src->y);
50 dst->z = cpu_to_le32(src->z);
51 dst->w = cpu_to_le32(src->w);
52 dst->h = cpu_to_le32(src->h);
53 dst->d = cpu_to_le32(src->d);
56 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
58 struct drm_device *dev = vq->vdev->priv;
59 struct virtio_gpu_device *vgdev = dev->dev_private;
61 schedule_work(&vgdev->ctrlq.dequeue_work);
64 void virtio_gpu_cursor_ack(struct virtqueue *vq)
66 struct drm_device *dev = vq->vdev->priv;
67 struct virtio_gpu_device *vgdev = dev->dev_private;
69 schedule_work(&vgdev->cursorq.dequeue_work);
72 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
74 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
76 __alignof__(struct virtio_gpu_vbuffer),
83 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
85 kmem_cache_destroy(vgdev->vbufs);
89 static struct virtio_gpu_vbuffer*
90 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
91 int size, int resp_size, void *resp_buf,
92 virtio_gpu_resp_cb resp_cb)
94 struct virtio_gpu_vbuffer *vbuf;
96 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
98 BUG_ON(size > MAX_INLINE_CMD_SIZE ||
99 size < sizeof(struct virtio_gpu_ctrl_hdr));
100 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
103 vbuf->resp_cb = resp_cb;
104 vbuf->resp_size = resp_size;
105 if (resp_size <= MAX_INLINE_RESP_SIZE)
106 vbuf->resp_buf = (void *)vbuf->buf + size;
108 vbuf->resp_buf = resp_buf;
109 BUG_ON(!vbuf->resp_buf);
113 static struct virtio_gpu_ctrl_hdr *
114 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
116 /* this assumes a vbuf contains a command that starts with a
117 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
120 return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
123 static struct virtio_gpu_update_cursor*
124 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
125 struct virtio_gpu_vbuffer **vbuffer_p)
127 struct virtio_gpu_vbuffer *vbuf;
129 vbuf = virtio_gpu_get_vbuf
130 (vgdev, sizeof(struct virtio_gpu_update_cursor),
134 return ERR_CAST(vbuf);
137 return (struct virtio_gpu_update_cursor *)vbuf->buf;
140 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
141 virtio_gpu_resp_cb cb,
142 struct virtio_gpu_vbuffer **vbuffer_p,
143 int cmd_size, int resp_size,
146 struct virtio_gpu_vbuffer *vbuf;
148 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
149 resp_size, resp_buf, cb);
151 return (struct virtio_gpu_command *)vbuf->buf;
154 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
155 struct virtio_gpu_vbuffer **vbuffer_p,
158 return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
159 sizeof(struct virtio_gpu_ctrl_hdr),
163 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
164 struct virtio_gpu_vbuffer **vbuffer_p,
166 virtio_gpu_resp_cb cb)
168 return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
169 sizeof(struct virtio_gpu_ctrl_hdr),
173 static void free_vbuf(struct virtio_gpu_device *vgdev,
174 struct virtio_gpu_vbuffer *vbuf)
176 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
177 kfree(vbuf->resp_buf);
178 kvfree(vbuf->data_buf);
179 kmem_cache_free(vgdev->vbufs, vbuf);
182 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
184 struct virtio_gpu_vbuffer *vbuf;
188 while ((vbuf = virtqueue_get_buf(vq, &len))) {
189 list_add_tail(&vbuf->list, reclaim_list);
193 DRM_DEBUG("Huh? zero vbufs reclaimed");
196 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
198 struct virtio_gpu_device *vgdev =
199 container_of(work, struct virtio_gpu_device,
201 struct list_head reclaim_list;
202 struct virtio_gpu_vbuffer *entry, *tmp;
203 struct virtio_gpu_ctrl_hdr *resp;
206 INIT_LIST_HEAD(&reclaim_list);
207 spin_lock(&vgdev->ctrlq.qlock);
209 virtqueue_disable_cb(vgdev->ctrlq.vq);
210 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
212 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
213 spin_unlock(&vgdev->ctrlq.qlock);
215 list_for_each_entry(entry, &reclaim_list, list) {
216 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
218 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
220 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
221 if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
222 struct virtio_gpu_ctrl_hdr *cmd;
223 cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
224 DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
225 le32_to_cpu(resp->type),
226 le32_to_cpu(cmd->type));
228 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
230 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
231 fence_id = le64_to_cpu(resp->fence_id);
232 virtio_gpu_fence_event_process(vgdev, fence_id);
235 entry->resp_cb(vgdev, entry);
237 wake_up(&vgdev->ctrlq.ack_queue);
239 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
241 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
242 list_del(&entry->list);
243 free_vbuf(vgdev, entry);
247 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
249 struct virtio_gpu_device *vgdev =
250 container_of(work, struct virtio_gpu_device,
251 cursorq.dequeue_work);
252 struct list_head reclaim_list;
253 struct virtio_gpu_vbuffer *entry, *tmp;
255 INIT_LIST_HEAD(&reclaim_list);
256 spin_lock(&vgdev->cursorq.qlock);
258 virtqueue_disable_cb(vgdev->cursorq.vq);
259 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
260 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
261 spin_unlock(&vgdev->cursorq.qlock);
263 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
264 list_del(&entry->list);
265 free_vbuf(vgdev, entry);
267 wake_up(&vgdev->cursorq.ack_queue);
270 /* Create sg_table from a vmalloc'd buffer. */
271 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
274 struct sg_table *sgt;
275 struct scatterlist *sg;
278 if (WARN_ON(!PAGE_ALIGNED(data)))
281 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
285 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
286 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
292 for_each_sgtable_sg(sgt, sg, i) {
293 pg = vmalloc_to_page(data);
300 s = min_t(int, PAGE_SIZE, size);
301 sg_set_page(sg, pg, s, 0);
310 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
311 struct virtio_gpu_vbuffer *vbuf,
312 struct virtio_gpu_fence *fence,
314 struct scatterlist **sgs,
318 struct virtqueue *vq = vgdev->ctrlq.vq;
321 if (!drm_dev_enter(vgdev->ddev, &idx)) {
322 if (fence && vbuf->objs)
323 virtio_gpu_array_unlock_resv(vbuf->objs);
324 free_vbuf(vgdev, vbuf);
328 if (vgdev->has_indirect)
332 spin_lock(&vgdev->ctrlq.qlock);
334 if (vq->num_free < elemcnt) {
335 spin_unlock(&vgdev->ctrlq.qlock);
336 virtio_gpu_notify(vgdev);
337 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
341 /* now that the position of the vbuf in the virtqueue is known, we can
342 * finally set the fence id
345 virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
348 virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
349 virtio_gpu_array_unlock_resv(vbuf->objs);
353 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
356 trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
358 atomic_inc(&vgdev->pending_commands);
360 spin_unlock(&vgdev->ctrlq.qlock);
366 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
367 struct virtio_gpu_vbuffer *vbuf,
368 struct virtio_gpu_fence *fence)
370 struct scatterlist *sgs[3], vcmd, vout, vresp;
371 struct sg_table *sgt = NULL;
372 int elemcnt = 0, outcnt = 0, incnt = 0, ret;
375 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
381 if (vbuf->data_size) {
382 if (is_vmalloc_addr(vbuf->data_buf)) {
384 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
387 if (fence && vbuf->objs)
388 virtio_gpu_array_unlock_resv(vbuf->objs);
393 sgs[outcnt] = sgt->sgl;
395 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
403 if (vbuf->resp_size) {
404 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
406 sgs[outcnt + incnt] = &vresp;
410 ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
420 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
424 if (!atomic_read(&vgdev->pending_commands))
427 spin_lock(&vgdev->ctrlq.qlock);
428 atomic_set(&vgdev->pending_commands, 0);
429 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
430 spin_unlock(&vgdev->ctrlq.qlock);
433 virtqueue_notify(vgdev->ctrlq.vq);
436 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
437 struct virtio_gpu_vbuffer *vbuf)
439 return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
442 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
443 struct virtio_gpu_vbuffer *vbuf)
445 struct virtqueue *vq = vgdev->cursorq.vq;
446 struct scatterlist *sgs[1], ccmd;
447 int idx, ret, outcnt;
450 if (!drm_dev_enter(vgdev->ddev, &idx)) {
451 free_vbuf(vgdev, vbuf);
455 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
459 spin_lock(&vgdev->cursorq.qlock);
461 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
462 if (ret == -ENOSPC) {
463 spin_unlock(&vgdev->cursorq.qlock);
464 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
465 spin_lock(&vgdev->cursorq.qlock);
468 trace_virtio_gpu_cmd_queue(vq,
469 virtio_gpu_vbuf_ctrl_hdr(vbuf));
471 notify = virtqueue_kick_prepare(vq);
474 spin_unlock(&vgdev->cursorq.qlock);
477 virtqueue_notify(vq);
482 /* just create gem objects for userspace and long lived objects,
483 * just use dma_alloced pages for the queue objects?
486 /* create a basic resource */
487 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
488 struct virtio_gpu_object *bo,
489 struct virtio_gpu_object_params *params,
490 struct virtio_gpu_object_array *objs,
491 struct virtio_gpu_fence *fence)
493 struct virtio_gpu_resource_create_2d *cmd_p;
494 struct virtio_gpu_vbuffer *vbuf;
496 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
497 memset(cmd_p, 0, sizeof(*cmd_p));
500 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
501 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
502 cmd_p->format = cpu_to_le32(params->format);
503 cmd_p->width = cpu_to_le32(params->width);
504 cmd_p->height = cpu_to_le32(params->height);
506 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
510 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
511 struct virtio_gpu_vbuffer *vbuf)
513 struct virtio_gpu_object *bo;
515 bo = vbuf->resp_cb_data;
516 vbuf->resp_cb_data = NULL;
518 virtio_gpu_cleanup_object(bo);
521 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
522 struct virtio_gpu_object *bo)
524 struct virtio_gpu_resource_unref *cmd_p;
525 struct virtio_gpu_vbuffer *vbuf;
528 cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
529 virtio_gpu_cmd_unref_cb);
530 memset(cmd_p, 0, sizeof(*cmd_p));
532 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
533 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
535 vbuf->resp_cb_data = bo;
536 ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
538 virtio_gpu_cleanup_object(bo);
541 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
542 uint32_t scanout_id, uint32_t resource_id,
543 uint32_t width, uint32_t height,
544 uint32_t x, uint32_t y)
546 struct virtio_gpu_set_scanout *cmd_p;
547 struct virtio_gpu_vbuffer *vbuf;
549 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
550 memset(cmd_p, 0, sizeof(*cmd_p));
552 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
553 cmd_p->resource_id = cpu_to_le32(resource_id);
554 cmd_p->scanout_id = cpu_to_le32(scanout_id);
555 cmd_p->r.width = cpu_to_le32(width);
556 cmd_p->r.height = cpu_to_le32(height);
557 cmd_p->r.x = cpu_to_le32(x);
558 cmd_p->r.y = cpu_to_le32(y);
560 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
563 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
564 uint32_t resource_id,
565 uint32_t x, uint32_t y,
566 uint32_t width, uint32_t height,
567 struct virtio_gpu_object_array *objs,
568 struct virtio_gpu_fence *fence)
570 struct virtio_gpu_resource_flush *cmd_p;
571 struct virtio_gpu_vbuffer *vbuf;
573 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
574 memset(cmd_p, 0, sizeof(*cmd_p));
577 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
578 cmd_p->resource_id = cpu_to_le32(resource_id);
579 cmd_p->r.width = cpu_to_le32(width);
580 cmd_p->r.height = cpu_to_le32(height);
581 cmd_p->r.x = cpu_to_le32(x);
582 cmd_p->r.y = cpu_to_le32(y);
584 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
587 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
589 uint32_t width, uint32_t height,
590 uint32_t x, uint32_t y,
591 struct virtio_gpu_object_array *objs,
592 struct virtio_gpu_fence *fence)
594 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
595 struct virtio_gpu_transfer_to_host_2d *cmd_p;
596 struct virtio_gpu_vbuffer *vbuf;
597 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
598 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
601 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
602 shmem->pages, DMA_TO_DEVICE);
604 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
605 memset(cmd_p, 0, sizeof(*cmd_p));
608 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
609 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
610 cmd_p->offset = cpu_to_le64(offset);
611 cmd_p->r.width = cpu_to_le32(width);
612 cmd_p->r.height = cpu_to_le32(height);
613 cmd_p->r.x = cpu_to_le32(x);
614 cmd_p->r.y = cpu_to_le32(y);
616 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
620 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
621 uint32_t resource_id,
622 struct virtio_gpu_mem_entry *ents,
624 struct virtio_gpu_fence *fence)
626 struct virtio_gpu_resource_attach_backing *cmd_p;
627 struct virtio_gpu_vbuffer *vbuf;
629 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
630 memset(cmd_p, 0, sizeof(*cmd_p));
632 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
633 cmd_p->resource_id = cpu_to_le32(resource_id);
634 cmd_p->nr_entries = cpu_to_le32(nents);
636 vbuf->data_buf = ents;
637 vbuf->data_size = sizeof(*ents) * nents;
639 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
642 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
643 struct virtio_gpu_vbuffer *vbuf)
645 struct virtio_gpu_resp_display_info *resp =
646 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
649 spin_lock(&vgdev->display_info_lock);
650 for (i = 0; i < vgdev->num_scanouts; i++) {
651 vgdev->outputs[i].info = resp->pmodes[i];
652 if (resp->pmodes[i].enabled) {
653 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
654 le32_to_cpu(resp->pmodes[i].r.width),
655 le32_to_cpu(resp->pmodes[i].r.height),
656 le32_to_cpu(resp->pmodes[i].r.x),
657 le32_to_cpu(resp->pmodes[i].r.y));
659 DRM_DEBUG("output %d: disabled", i);
663 vgdev->display_info_pending = false;
664 spin_unlock(&vgdev->display_info_lock);
665 wake_up(&vgdev->resp_wq);
667 if (!drm_helper_hpd_irq_event(vgdev->ddev))
668 drm_kms_helper_hotplug_event(vgdev->ddev);
671 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
672 struct virtio_gpu_vbuffer *vbuf)
674 struct virtio_gpu_get_capset_info *cmd =
675 (struct virtio_gpu_get_capset_info *)vbuf->buf;
676 struct virtio_gpu_resp_capset_info *resp =
677 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
678 int i = le32_to_cpu(cmd->capset_index);
680 spin_lock(&vgdev->display_info_lock);
681 if (vgdev->capsets) {
682 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
683 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
684 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
686 DRM_ERROR("invalid capset memory.");
688 spin_unlock(&vgdev->display_info_lock);
689 wake_up(&vgdev->resp_wq);
692 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
693 struct virtio_gpu_vbuffer *vbuf)
695 struct virtio_gpu_get_capset *cmd =
696 (struct virtio_gpu_get_capset *)vbuf->buf;
697 struct virtio_gpu_resp_capset *resp =
698 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
699 struct virtio_gpu_drv_cap_cache *cache_ent;
701 spin_lock(&vgdev->display_info_lock);
702 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
703 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
704 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
705 memcpy(cache_ent->caps_cache, resp->capset_data,
707 /* Copy must occur before is_valid is signalled. */
709 atomic_set(&cache_ent->is_valid, 1);
713 spin_unlock(&vgdev->display_info_lock);
714 wake_up_all(&vgdev->resp_wq);
717 static int virtio_get_edid_block(void *data, u8 *buf,
718 unsigned int block, size_t len)
720 struct virtio_gpu_resp_edid *resp = data;
721 size_t start = block * EDID_LENGTH;
723 if (start + len > le32_to_cpu(resp->size))
725 memcpy(buf, resp->edid + start, len);
729 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
730 struct virtio_gpu_vbuffer *vbuf)
732 struct virtio_gpu_cmd_get_edid *cmd =
733 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
734 struct virtio_gpu_resp_edid *resp =
735 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
736 uint32_t scanout = le32_to_cpu(cmd->scanout);
737 struct virtio_gpu_output *output;
738 struct edid *new_edid, *old_edid;
740 if (scanout >= vgdev->num_scanouts)
742 output = vgdev->outputs + scanout;
744 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
745 drm_connector_update_edid_property(&output->conn, new_edid);
747 spin_lock(&vgdev->display_info_lock);
748 old_edid = output->edid;
749 output->edid = new_edid;
750 spin_unlock(&vgdev->display_info_lock);
753 wake_up(&vgdev->resp_wq);
756 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
758 struct virtio_gpu_ctrl_hdr *cmd_p;
759 struct virtio_gpu_vbuffer *vbuf;
762 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
767 cmd_p = virtio_gpu_alloc_cmd_resp
768 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
769 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
771 memset(cmd_p, 0, sizeof(*cmd_p));
773 vgdev->display_info_pending = true;
774 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
775 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
779 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
781 struct virtio_gpu_get_capset_info *cmd_p;
782 struct virtio_gpu_vbuffer *vbuf;
785 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
790 cmd_p = virtio_gpu_alloc_cmd_resp
791 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
792 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
794 memset(cmd_p, 0, sizeof(*cmd_p));
796 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
797 cmd_p->capset_index = cpu_to_le32(idx);
798 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
802 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
803 int idx, int version,
804 struct virtio_gpu_drv_cap_cache **cache_p)
806 struct virtio_gpu_get_capset *cmd_p;
807 struct virtio_gpu_vbuffer *vbuf;
809 struct virtio_gpu_drv_cap_cache *cache_ent;
810 struct virtio_gpu_drv_cap_cache *search_ent;
815 if (idx >= vgdev->num_capsets)
818 if (version > vgdev->capsets[idx].max_version)
821 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
825 max_size = vgdev->capsets[idx].max_size;
826 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
827 if (!cache_ent->caps_cache) {
832 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
835 kfree(cache_ent->caps_cache);
840 cache_ent->version = version;
841 cache_ent->id = vgdev->capsets[idx].id;
842 atomic_set(&cache_ent->is_valid, 0);
843 cache_ent->size = max_size;
844 spin_lock(&vgdev->display_info_lock);
845 /* Search while under lock in case it was added by another task. */
846 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
847 if (search_ent->id == vgdev->capsets[idx].id &&
848 search_ent->version == version) {
849 *cache_p = search_ent;
854 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
855 spin_unlock(&vgdev->display_info_lock);
858 /* Entry was found, so free everything that was just created. */
860 kfree(cache_ent->caps_cache);
865 cmd_p = virtio_gpu_alloc_cmd_resp
866 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
867 sizeof(struct virtio_gpu_resp_capset) + max_size,
869 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
870 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
871 cmd_p->capset_version = cpu_to_le32(version);
872 *cache_p = cache_ent;
873 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
878 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
880 struct virtio_gpu_cmd_get_edid *cmd_p;
881 struct virtio_gpu_vbuffer *vbuf;
885 if (WARN_ON(!vgdev->has_edid))
888 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
889 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
894 cmd_p = virtio_gpu_alloc_cmd_resp
895 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
896 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
898 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
899 cmd_p->scanout = cpu_to_le32(scanout);
900 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
906 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
907 uint32_t context_init, uint32_t nlen,
910 struct virtio_gpu_ctx_create *cmd_p;
911 struct virtio_gpu_vbuffer *vbuf;
913 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
914 memset(cmd_p, 0, sizeof(*cmd_p));
916 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
917 cmd_p->hdr.ctx_id = cpu_to_le32(id);
918 cmd_p->nlen = cpu_to_le32(nlen);
919 cmd_p->context_init = cpu_to_le32(context_init);
920 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
921 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
922 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
925 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
928 struct virtio_gpu_ctx_destroy *cmd_p;
929 struct virtio_gpu_vbuffer *vbuf;
931 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
932 memset(cmd_p, 0, sizeof(*cmd_p));
934 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
935 cmd_p->hdr.ctx_id = cpu_to_le32(id);
936 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
939 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
941 struct virtio_gpu_object_array *objs)
943 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
944 struct virtio_gpu_ctx_resource *cmd_p;
945 struct virtio_gpu_vbuffer *vbuf;
947 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
948 memset(cmd_p, 0, sizeof(*cmd_p));
951 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
952 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
953 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
954 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
957 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
959 struct virtio_gpu_object_array *objs)
961 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
962 struct virtio_gpu_ctx_resource *cmd_p;
963 struct virtio_gpu_vbuffer *vbuf;
965 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
966 memset(cmd_p, 0, sizeof(*cmd_p));
969 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
970 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
971 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
972 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
976 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
977 struct virtio_gpu_object *bo,
978 struct virtio_gpu_object_params *params,
979 struct virtio_gpu_object_array *objs,
980 struct virtio_gpu_fence *fence)
982 struct virtio_gpu_resource_create_3d *cmd_p;
983 struct virtio_gpu_vbuffer *vbuf;
985 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
986 memset(cmd_p, 0, sizeof(*cmd_p));
989 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
990 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
991 cmd_p->format = cpu_to_le32(params->format);
992 cmd_p->width = cpu_to_le32(params->width);
993 cmd_p->height = cpu_to_le32(params->height);
995 cmd_p->target = cpu_to_le32(params->target);
996 cmd_p->bind = cpu_to_le32(params->bind);
997 cmd_p->depth = cpu_to_le32(params->depth);
998 cmd_p->array_size = cpu_to_le32(params->array_size);
999 cmd_p->last_level = cpu_to_le32(params->last_level);
1000 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1001 cmd_p->flags = cpu_to_le32(params->flags);
1003 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1008 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1010 uint64_t offset, uint32_t level,
1012 uint32_t layer_stride,
1013 struct drm_virtgpu_3d_box *box,
1014 struct virtio_gpu_object_array *objs,
1015 struct virtio_gpu_fence *fence)
1017 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1018 struct virtio_gpu_transfer_host_3d *cmd_p;
1019 struct virtio_gpu_vbuffer *vbuf;
1020 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1022 if (virtio_gpu_is_shmem(bo) && use_dma_api) {
1023 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
1024 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1025 shmem->pages, DMA_TO_DEVICE);
1028 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1029 memset(cmd_p, 0, sizeof(*cmd_p));
1033 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1034 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1035 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1036 convert_to_hw_box(&cmd_p->box, box);
1037 cmd_p->offset = cpu_to_le64(offset);
1038 cmd_p->level = cpu_to_le32(level);
1039 cmd_p->stride = cpu_to_le32(stride);
1040 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1042 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1045 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1047 uint64_t offset, uint32_t level,
1049 uint32_t layer_stride,
1050 struct drm_virtgpu_3d_box *box,
1051 struct virtio_gpu_object_array *objs,
1052 struct virtio_gpu_fence *fence)
1054 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1055 struct virtio_gpu_transfer_host_3d *cmd_p;
1056 struct virtio_gpu_vbuffer *vbuf;
1058 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1059 memset(cmd_p, 0, sizeof(*cmd_p));
1063 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1064 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1065 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1066 convert_to_hw_box(&cmd_p->box, box);
1067 cmd_p->offset = cpu_to_le64(offset);
1068 cmd_p->level = cpu_to_le32(level);
1069 cmd_p->stride = cpu_to_le32(stride);
1070 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1072 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1075 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1076 void *data, uint32_t data_size,
1078 struct virtio_gpu_object_array *objs,
1079 struct virtio_gpu_fence *fence)
1081 struct virtio_gpu_cmd_submit *cmd_p;
1082 struct virtio_gpu_vbuffer *vbuf;
1084 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1085 memset(cmd_p, 0, sizeof(*cmd_p));
1087 vbuf->data_buf = data;
1088 vbuf->data_size = data_size;
1091 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1092 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1093 cmd_p->size = cpu_to_le32(data_size);
1095 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1098 void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1099 struct virtio_gpu_object *obj,
1100 struct virtio_gpu_mem_entry *ents,
1103 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1107 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1108 struct virtio_gpu_output *output)
1110 struct virtio_gpu_vbuffer *vbuf;
1111 struct virtio_gpu_update_cursor *cur_p;
1113 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1114 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1115 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1116 virtio_gpu_queue_cursor(vgdev, vbuf);
1119 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1120 struct virtio_gpu_vbuffer *vbuf)
1122 struct virtio_gpu_object *obj =
1123 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1124 struct virtio_gpu_resp_resource_uuid *resp =
1125 (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1126 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1128 spin_lock(&vgdev->resource_export_lock);
1129 WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1131 if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1132 obj->uuid_state == STATE_INITIALIZING) {
1133 import_uuid(&obj->uuid, resp->uuid);
1134 obj->uuid_state = STATE_OK;
1136 obj->uuid_state = STATE_ERR;
1138 spin_unlock(&vgdev->resource_export_lock);
1140 wake_up_all(&vgdev->resp_wq);
1144 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1145 struct virtio_gpu_object_array *objs)
1147 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1148 struct virtio_gpu_resource_assign_uuid *cmd_p;
1149 struct virtio_gpu_vbuffer *vbuf;
1150 struct virtio_gpu_resp_resource_uuid *resp_buf;
1152 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1154 spin_lock(&vgdev->resource_export_lock);
1155 bo->uuid_state = STATE_ERR;
1156 spin_unlock(&vgdev->resource_export_lock);
1157 virtio_gpu_array_put_free(objs);
1161 cmd_p = virtio_gpu_alloc_cmd_resp
1162 (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1163 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1164 memset(cmd_p, 0, sizeof(*cmd_p));
1166 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1167 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1170 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1174 static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
1175 struct virtio_gpu_vbuffer *vbuf)
1177 struct virtio_gpu_object *bo =
1178 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1179 struct virtio_gpu_resp_map_info *resp =
1180 (struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
1181 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
1182 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1184 spin_lock(&vgdev->host_visible_lock);
1186 if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
1187 vram->map_info = resp->map_info;
1188 vram->map_state = STATE_OK;
1190 vram->map_state = STATE_ERR;
1193 spin_unlock(&vgdev->host_visible_lock);
1194 wake_up_all(&vgdev->resp_wq);
1197 int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
1198 struct virtio_gpu_object_array *objs, uint64_t offset)
1200 struct virtio_gpu_resource_map_blob *cmd_p;
1201 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1202 struct virtio_gpu_vbuffer *vbuf;
1203 struct virtio_gpu_resp_map_info *resp_buf;
1205 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1209 cmd_p = virtio_gpu_alloc_cmd_resp
1210 (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
1211 sizeof(struct virtio_gpu_resp_map_info), resp_buf);
1212 memset(cmd_p, 0, sizeof(*cmd_p));
1214 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
1215 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1216 cmd_p->offset = cpu_to_le64(offset);
1219 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1223 void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
1224 struct virtio_gpu_object *bo)
1226 struct virtio_gpu_resource_unmap_blob *cmd_p;
1227 struct virtio_gpu_vbuffer *vbuf;
1229 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1230 memset(cmd_p, 0, sizeof(*cmd_p));
1232 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
1233 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1235 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1239 virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1240 struct virtio_gpu_object *bo,
1241 struct virtio_gpu_object_params *params,
1242 struct virtio_gpu_mem_entry *ents,
1245 struct virtio_gpu_resource_create_blob *cmd_p;
1246 struct virtio_gpu_vbuffer *vbuf;
1248 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1249 memset(cmd_p, 0, sizeof(*cmd_p));
1251 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
1252 cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
1253 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1254 cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
1255 cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
1256 cmd_p->blob_id = cpu_to_le64(params->blob_id);
1257 cmd_p->size = cpu_to_le64(params->size);
1258 cmd_p->nr_entries = cpu_to_le32(nents);
1260 vbuf->data_buf = ents;
1261 vbuf->data_size = sizeof(*ents) * nents;
1263 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1267 void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1268 uint32_t scanout_id,
1269 struct virtio_gpu_object *bo,
1270 struct drm_framebuffer *fb,
1271 uint32_t width, uint32_t height,
1272 uint32_t x, uint32_t y)
1275 struct virtio_gpu_set_scanout_blob *cmd_p;
1276 struct virtio_gpu_vbuffer *vbuf;
1277 uint32_t format = virtio_gpu_translate_format(fb->format->format);
1279 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1280 memset(cmd_p, 0, sizeof(*cmd_p));
1282 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
1283 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1284 cmd_p->scanout_id = cpu_to_le32(scanout_id);
1286 cmd_p->format = cpu_to_le32(format);
1287 cmd_p->width = cpu_to_le32(fb->width);
1288 cmd_p->height = cpu_to_le32(fb->height);
1290 for (i = 0; i < 4; i++) {
1291 cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
1292 cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
1295 cmd_p->r.width = cpu_to_le32(width);
1296 cmd_p->r.height = cpu_to_le32(height);
1297 cmd_p->r.x = cpu_to_le32(x);
1298 cmd_p->r.y = cpu_to_le32(y);
1300 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);