2 * Copyright (C) 2015 Red Hat, Inc.
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
34 #include "virtgpu_drv.h"
35 #include "virtgpu_trace.h"
37 #define MAX_INLINE_CMD_SIZE 96
38 #define MAX_INLINE_RESP_SIZE 24
39 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
40 + MAX_INLINE_CMD_SIZE \
41 + MAX_INLINE_RESP_SIZE)
43 static void convert_to_hw_box(struct virtio_gpu_box *dst,
44 const struct drm_virtgpu_3d_box *src)
46 dst->x = cpu_to_le32(src->x);
47 dst->y = cpu_to_le32(src->y);
48 dst->z = cpu_to_le32(src->z);
49 dst->w = cpu_to_le32(src->w);
50 dst->h = cpu_to_le32(src->h);
51 dst->d = cpu_to_le32(src->d);
54 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
56 struct drm_device *dev = vq->vdev->priv;
57 struct virtio_gpu_device *vgdev = dev->dev_private;
59 schedule_work(&vgdev->ctrlq.dequeue_work);
62 void virtio_gpu_cursor_ack(struct virtqueue *vq)
64 struct drm_device *dev = vq->vdev->priv;
65 struct virtio_gpu_device *vgdev = dev->dev_private;
67 schedule_work(&vgdev->cursorq.dequeue_work);
70 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
72 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
74 __alignof__(struct virtio_gpu_vbuffer),
81 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
83 kmem_cache_destroy(vgdev->vbufs);
87 static struct virtio_gpu_vbuffer*
88 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
89 int size, int resp_size, void *resp_buf,
90 virtio_gpu_resp_cb resp_cb)
92 struct virtio_gpu_vbuffer *vbuf;
94 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
96 BUG_ON(size > MAX_INLINE_CMD_SIZE ||
97 size < sizeof(struct virtio_gpu_ctrl_hdr));
98 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
101 vbuf->resp_cb = resp_cb;
102 vbuf->resp_size = resp_size;
103 if (resp_size <= MAX_INLINE_RESP_SIZE)
104 vbuf->resp_buf = (void *)vbuf->buf + size;
106 vbuf->resp_buf = resp_buf;
107 BUG_ON(!vbuf->resp_buf);
111 static struct virtio_gpu_ctrl_hdr *
112 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
114 /* this assumes a vbuf contains a command that starts with a
115 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
118 return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
121 static struct virtio_gpu_update_cursor*
122 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
123 struct virtio_gpu_vbuffer **vbuffer_p)
125 struct virtio_gpu_vbuffer *vbuf;
127 vbuf = virtio_gpu_get_vbuf
128 (vgdev, sizeof(struct virtio_gpu_update_cursor),
132 return ERR_CAST(vbuf);
135 return (struct virtio_gpu_update_cursor *)vbuf->buf;
138 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
139 virtio_gpu_resp_cb cb,
140 struct virtio_gpu_vbuffer **vbuffer_p,
141 int cmd_size, int resp_size,
144 struct virtio_gpu_vbuffer *vbuf;
146 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
147 resp_size, resp_buf, cb);
149 return (struct virtio_gpu_command *)vbuf->buf;
152 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
153 struct virtio_gpu_vbuffer **vbuffer_p,
156 return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
157 sizeof(struct virtio_gpu_ctrl_hdr),
161 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
162 struct virtio_gpu_vbuffer **vbuffer_p,
164 virtio_gpu_resp_cb cb)
166 return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
167 sizeof(struct virtio_gpu_ctrl_hdr),
171 static void free_vbuf(struct virtio_gpu_device *vgdev,
172 struct virtio_gpu_vbuffer *vbuf)
174 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
175 kfree(vbuf->resp_buf);
176 kvfree(vbuf->data_buf);
177 kmem_cache_free(vgdev->vbufs, vbuf);
180 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
182 struct virtio_gpu_vbuffer *vbuf;
186 while ((vbuf = virtqueue_get_buf(vq, &len))) {
187 list_add_tail(&vbuf->list, reclaim_list);
191 DRM_DEBUG("Huh? zero vbufs reclaimed");
194 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
196 struct virtio_gpu_device *vgdev =
197 container_of(work, struct virtio_gpu_device,
199 struct list_head reclaim_list;
200 struct virtio_gpu_vbuffer *entry, *tmp;
201 struct virtio_gpu_ctrl_hdr *resp;
204 INIT_LIST_HEAD(&reclaim_list);
205 spin_lock(&vgdev->ctrlq.qlock);
207 virtqueue_disable_cb(vgdev->ctrlq.vq);
208 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
210 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
211 spin_unlock(&vgdev->ctrlq.qlock);
213 list_for_each_entry(entry, &reclaim_list, list) {
214 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
216 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
218 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
219 if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
220 struct virtio_gpu_ctrl_hdr *cmd;
221 cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
222 DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
223 le32_to_cpu(resp->type),
224 le32_to_cpu(cmd->type));
226 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
228 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
229 fence_id = le64_to_cpu(resp->fence_id);
230 virtio_gpu_fence_event_process(vgdev, fence_id);
233 entry->resp_cb(vgdev, entry);
235 wake_up(&vgdev->ctrlq.ack_queue);
237 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
239 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
240 list_del(&entry->list);
241 free_vbuf(vgdev, entry);
245 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
247 struct virtio_gpu_device *vgdev =
248 container_of(work, struct virtio_gpu_device,
249 cursorq.dequeue_work);
250 struct list_head reclaim_list;
251 struct virtio_gpu_vbuffer *entry, *tmp;
253 INIT_LIST_HEAD(&reclaim_list);
254 spin_lock(&vgdev->cursorq.qlock);
256 virtqueue_disable_cb(vgdev->cursorq.vq);
257 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
258 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
259 spin_unlock(&vgdev->cursorq.qlock);
261 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
262 list_del(&entry->list);
263 free_vbuf(vgdev, entry);
265 wake_up(&vgdev->cursorq.ack_queue);
268 /* Create sg_table from a vmalloc'd buffer. */
269 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
272 struct sg_table *sgt;
273 struct scatterlist *sg;
276 if (WARN_ON(!PAGE_ALIGNED(data)))
279 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
283 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
284 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
290 for_each_sgtable_sg(sgt, sg, i) {
291 pg = vmalloc_to_page(data);
298 s = min_t(int, PAGE_SIZE, size);
299 sg_set_page(sg, pg, s, 0);
308 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
309 struct virtio_gpu_vbuffer *vbuf,
310 struct virtio_gpu_fence *fence,
312 struct scatterlist **sgs,
316 struct virtqueue *vq = vgdev->ctrlq.vq;
319 if (!drm_dev_enter(vgdev->ddev, &idx)) {
320 if (fence && vbuf->objs)
321 virtio_gpu_array_unlock_resv(vbuf->objs);
322 free_vbuf(vgdev, vbuf);
326 if (vgdev->has_indirect)
330 spin_lock(&vgdev->ctrlq.qlock);
332 if (vq->num_free < elemcnt) {
333 spin_unlock(&vgdev->ctrlq.qlock);
334 virtio_gpu_notify(vgdev);
335 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
339 /* now that the position of the vbuf in the virtqueue is known, we can
340 * finally set the fence id
343 virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
346 virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
347 virtio_gpu_array_unlock_resv(vbuf->objs);
351 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
354 trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
356 atomic_inc(&vgdev->pending_commands);
358 spin_unlock(&vgdev->ctrlq.qlock);
364 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
365 struct virtio_gpu_vbuffer *vbuf,
366 struct virtio_gpu_fence *fence)
368 struct scatterlist *sgs[3], vcmd, vout, vresp;
369 struct sg_table *sgt = NULL;
370 int elemcnt = 0, outcnt = 0, incnt = 0, ret;
373 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
379 if (vbuf->data_size) {
380 if (is_vmalloc_addr(vbuf->data_buf)) {
382 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
385 if (fence && vbuf->objs)
386 virtio_gpu_array_unlock_resv(vbuf->objs);
391 sgs[outcnt] = sgt->sgl;
393 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
401 if (vbuf->resp_size) {
402 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
404 sgs[outcnt + incnt] = &vresp;
408 ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
418 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
422 if (!atomic_read(&vgdev->pending_commands))
425 spin_lock(&vgdev->ctrlq.qlock);
426 atomic_set(&vgdev->pending_commands, 0);
427 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
428 spin_unlock(&vgdev->ctrlq.qlock);
431 virtqueue_notify(vgdev->ctrlq.vq);
434 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
435 struct virtio_gpu_vbuffer *vbuf)
437 return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
440 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
441 struct virtio_gpu_vbuffer *vbuf)
443 struct virtqueue *vq = vgdev->cursorq.vq;
444 struct scatterlist *sgs[1], ccmd;
445 int idx, ret, outcnt;
448 if (!drm_dev_enter(vgdev->ddev, &idx)) {
449 free_vbuf(vgdev, vbuf);
453 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
457 spin_lock(&vgdev->cursorq.qlock);
459 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
460 if (ret == -ENOSPC) {
461 spin_unlock(&vgdev->cursorq.qlock);
462 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
463 spin_lock(&vgdev->cursorq.qlock);
466 trace_virtio_gpu_cmd_queue(vq,
467 virtio_gpu_vbuf_ctrl_hdr(vbuf));
469 notify = virtqueue_kick_prepare(vq);
472 spin_unlock(&vgdev->cursorq.qlock);
475 virtqueue_notify(vq);
480 /* just create gem objects for userspace and long lived objects,
481 * just use dma_alloced pages for the queue objects?
484 /* create a basic resource */
485 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
486 struct virtio_gpu_object *bo,
487 struct virtio_gpu_object_params *params,
488 struct virtio_gpu_object_array *objs,
489 struct virtio_gpu_fence *fence)
491 struct virtio_gpu_resource_create_2d *cmd_p;
492 struct virtio_gpu_vbuffer *vbuf;
494 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
495 memset(cmd_p, 0, sizeof(*cmd_p));
498 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
499 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
500 cmd_p->format = cpu_to_le32(params->format);
501 cmd_p->width = cpu_to_le32(params->width);
502 cmd_p->height = cpu_to_le32(params->height);
504 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
508 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
509 struct virtio_gpu_vbuffer *vbuf)
511 struct virtio_gpu_object *bo;
513 bo = vbuf->resp_cb_data;
514 vbuf->resp_cb_data = NULL;
516 virtio_gpu_cleanup_object(bo);
519 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
520 struct virtio_gpu_object *bo)
522 struct virtio_gpu_resource_unref *cmd_p;
523 struct virtio_gpu_vbuffer *vbuf;
526 cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
527 virtio_gpu_cmd_unref_cb);
528 memset(cmd_p, 0, sizeof(*cmd_p));
530 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
531 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
533 vbuf->resp_cb_data = bo;
534 ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
536 virtio_gpu_cleanup_object(bo);
539 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
540 uint32_t scanout_id, uint32_t resource_id,
541 uint32_t width, uint32_t height,
542 uint32_t x, uint32_t y)
544 struct virtio_gpu_set_scanout *cmd_p;
545 struct virtio_gpu_vbuffer *vbuf;
547 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
548 memset(cmd_p, 0, sizeof(*cmd_p));
550 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
551 cmd_p->resource_id = cpu_to_le32(resource_id);
552 cmd_p->scanout_id = cpu_to_le32(scanout_id);
553 cmd_p->r.width = cpu_to_le32(width);
554 cmd_p->r.height = cpu_to_le32(height);
555 cmd_p->r.x = cpu_to_le32(x);
556 cmd_p->r.y = cpu_to_le32(y);
558 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
561 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
562 uint32_t resource_id,
563 uint32_t x, uint32_t y,
564 uint32_t width, uint32_t height,
565 struct virtio_gpu_object_array *objs,
566 struct virtio_gpu_fence *fence)
568 struct virtio_gpu_resource_flush *cmd_p;
569 struct virtio_gpu_vbuffer *vbuf;
571 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
572 memset(cmd_p, 0, sizeof(*cmd_p));
575 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
576 cmd_p->resource_id = cpu_to_le32(resource_id);
577 cmd_p->r.width = cpu_to_le32(width);
578 cmd_p->r.height = cpu_to_le32(height);
579 cmd_p->r.x = cpu_to_le32(x);
580 cmd_p->r.y = cpu_to_le32(y);
582 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
585 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
587 uint32_t width, uint32_t height,
588 uint32_t x, uint32_t y,
589 struct virtio_gpu_object_array *objs,
590 struct virtio_gpu_fence *fence)
592 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
593 struct virtio_gpu_transfer_to_host_2d *cmd_p;
594 struct virtio_gpu_vbuffer *vbuf;
595 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
596 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
599 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
600 shmem->pages, DMA_TO_DEVICE);
602 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
603 memset(cmd_p, 0, sizeof(*cmd_p));
606 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
607 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
608 cmd_p->offset = cpu_to_le64(offset);
609 cmd_p->r.width = cpu_to_le32(width);
610 cmd_p->r.height = cpu_to_le32(height);
611 cmd_p->r.x = cpu_to_le32(x);
612 cmd_p->r.y = cpu_to_le32(y);
614 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
618 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
619 uint32_t resource_id,
620 struct virtio_gpu_mem_entry *ents,
622 struct virtio_gpu_fence *fence)
624 struct virtio_gpu_resource_attach_backing *cmd_p;
625 struct virtio_gpu_vbuffer *vbuf;
627 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
628 memset(cmd_p, 0, sizeof(*cmd_p));
630 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
631 cmd_p->resource_id = cpu_to_le32(resource_id);
632 cmd_p->nr_entries = cpu_to_le32(nents);
634 vbuf->data_buf = ents;
635 vbuf->data_size = sizeof(*ents) * nents;
637 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
640 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
641 struct virtio_gpu_vbuffer *vbuf)
643 struct virtio_gpu_resp_display_info *resp =
644 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
647 spin_lock(&vgdev->display_info_lock);
648 for (i = 0; i < vgdev->num_scanouts; i++) {
649 vgdev->outputs[i].info = resp->pmodes[i];
650 if (resp->pmodes[i].enabled) {
651 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
652 le32_to_cpu(resp->pmodes[i].r.width),
653 le32_to_cpu(resp->pmodes[i].r.height),
654 le32_to_cpu(resp->pmodes[i].r.x),
655 le32_to_cpu(resp->pmodes[i].r.y));
657 DRM_DEBUG("output %d: disabled", i);
661 vgdev->display_info_pending = false;
662 spin_unlock(&vgdev->display_info_lock);
663 wake_up(&vgdev->resp_wq);
665 if (!drm_helper_hpd_irq_event(vgdev->ddev))
666 drm_kms_helper_hotplug_event(vgdev->ddev);
669 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
670 struct virtio_gpu_vbuffer *vbuf)
672 struct virtio_gpu_get_capset_info *cmd =
673 (struct virtio_gpu_get_capset_info *)vbuf->buf;
674 struct virtio_gpu_resp_capset_info *resp =
675 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
676 int i = le32_to_cpu(cmd->capset_index);
678 spin_lock(&vgdev->display_info_lock);
679 if (vgdev->capsets) {
680 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
681 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
682 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
684 DRM_ERROR("invalid capset memory.");
686 spin_unlock(&vgdev->display_info_lock);
687 wake_up(&vgdev->resp_wq);
690 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
691 struct virtio_gpu_vbuffer *vbuf)
693 struct virtio_gpu_get_capset *cmd =
694 (struct virtio_gpu_get_capset *)vbuf->buf;
695 struct virtio_gpu_resp_capset *resp =
696 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
697 struct virtio_gpu_drv_cap_cache *cache_ent;
699 spin_lock(&vgdev->display_info_lock);
700 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
701 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
702 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
703 memcpy(cache_ent->caps_cache, resp->capset_data,
705 /* Copy must occur before is_valid is signalled. */
707 atomic_set(&cache_ent->is_valid, 1);
711 spin_unlock(&vgdev->display_info_lock);
712 wake_up_all(&vgdev->resp_wq);
715 static int virtio_get_edid_block(void *data, u8 *buf,
716 unsigned int block, size_t len)
718 struct virtio_gpu_resp_edid *resp = data;
719 size_t start = block * EDID_LENGTH;
721 if (start + len > le32_to_cpu(resp->size))
723 memcpy(buf, resp->edid + start, len);
727 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
728 struct virtio_gpu_vbuffer *vbuf)
730 struct virtio_gpu_cmd_get_edid *cmd =
731 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
732 struct virtio_gpu_resp_edid *resp =
733 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
734 uint32_t scanout = le32_to_cpu(cmd->scanout);
735 struct virtio_gpu_output *output;
736 struct edid *new_edid, *old_edid;
738 if (scanout >= vgdev->num_scanouts)
740 output = vgdev->outputs + scanout;
742 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
743 drm_connector_update_edid_property(&output->conn, new_edid);
745 spin_lock(&vgdev->display_info_lock);
746 old_edid = output->edid;
747 output->edid = new_edid;
748 spin_unlock(&vgdev->display_info_lock);
751 wake_up(&vgdev->resp_wq);
754 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
756 struct virtio_gpu_ctrl_hdr *cmd_p;
757 struct virtio_gpu_vbuffer *vbuf;
760 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
765 cmd_p = virtio_gpu_alloc_cmd_resp
766 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
767 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
769 memset(cmd_p, 0, sizeof(*cmd_p));
771 vgdev->display_info_pending = true;
772 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
773 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
777 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
779 struct virtio_gpu_get_capset_info *cmd_p;
780 struct virtio_gpu_vbuffer *vbuf;
783 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
788 cmd_p = virtio_gpu_alloc_cmd_resp
789 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
790 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
792 memset(cmd_p, 0, sizeof(*cmd_p));
794 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
795 cmd_p->capset_index = cpu_to_le32(idx);
796 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
800 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
801 int idx, int version,
802 struct virtio_gpu_drv_cap_cache **cache_p)
804 struct virtio_gpu_get_capset *cmd_p;
805 struct virtio_gpu_vbuffer *vbuf;
807 struct virtio_gpu_drv_cap_cache *cache_ent;
808 struct virtio_gpu_drv_cap_cache *search_ent;
813 if (idx >= vgdev->num_capsets)
816 if (version > vgdev->capsets[idx].max_version)
819 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
823 max_size = vgdev->capsets[idx].max_size;
824 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
825 if (!cache_ent->caps_cache) {
830 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
833 kfree(cache_ent->caps_cache);
838 cache_ent->version = version;
839 cache_ent->id = vgdev->capsets[idx].id;
840 atomic_set(&cache_ent->is_valid, 0);
841 cache_ent->size = max_size;
842 spin_lock(&vgdev->display_info_lock);
843 /* Search while under lock in case it was added by another task. */
844 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
845 if (search_ent->id == vgdev->capsets[idx].id &&
846 search_ent->version == version) {
847 *cache_p = search_ent;
852 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
853 spin_unlock(&vgdev->display_info_lock);
856 /* Entry was found, so free everything that was just created. */
858 kfree(cache_ent->caps_cache);
863 cmd_p = virtio_gpu_alloc_cmd_resp
864 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
865 sizeof(struct virtio_gpu_resp_capset) + max_size,
867 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
868 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
869 cmd_p->capset_version = cpu_to_le32(version);
870 *cache_p = cache_ent;
871 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
876 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
878 struct virtio_gpu_cmd_get_edid *cmd_p;
879 struct virtio_gpu_vbuffer *vbuf;
883 if (WARN_ON(!vgdev->has_edid))
886 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
887 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
892 cmd_p = virtio_gpu_alloc_cmd_resp
893 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
894 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
896 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
897 cmd_p->scanout = cpu_to_le32(scanout);
898 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
904 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
905 uint32_t context_init, uint32_t nlen,
908 struct virtio_gpu_ctx_create *cmd_p;
909 struct virtio_gpu_vbuffer *vbuf;
911 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
912 memset(cmd_p, 0, sizeof(*cmd_p));
914 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
915 cmd_p->hdr.ctx_id = cpu_to_le32(id);
916 cmd_p->nlen = cpu_to_le32(nlen);
917 cmd_p->context_init = cpu_to_le32(context_init);
918 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
919 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
920 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
923 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
926 struct virtio_gpu_ctx_destroy *cmd_p;
927 struct virtio_gpu_vbuffer *vbuf;
929 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
930 memset(cmd_p, 0, sizeof(*cmd_p));
932 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
933 cmd_p->hdr.ctx_id = cpu_to_le32(id);
934 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
937 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
939 struct virtio_gpu_object_array *objs)
941 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
942 struct virtio_gpu_ctx_resource *cmd_p;
943 struct virtio_gpu_vbuffer *vbuf;
945 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
946 memset(cmd_p, 0, sizeof(*cmd_p));
949 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
950 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
951 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
952 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
955 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
957 struct virtio_gpu_object_array *objs)
959 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
960 struct virtio_gpu_ctx_resource *cmd_p;
961 struct virtio_gpu_vbuffer *vbuf;
963 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
964 memset(cmd_p, 0, sizeof(*cmd_p));
967 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
968 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
969 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
970 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
974 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
975 struct virtio_gpu_object *bo,
976 struct virtio_gpu_object_params *params,
977 struct virtio_gpu_object_array *objs,
978 struct virtio_gpu_fence *fence)
980 struct virtio_gpu_resource_create_3d *cmd_p;
981 struct virtio_gpu_vbuffer *vbuf;
983 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
984 memset(cmd_p, 0, sizeof(*cmd_p));
987 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
988 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
989 cmd_p->format = cpu_to_le32(params->format);
990 cmd_p->width = cpu_to_le32(params->width);
991 cmd_p->height = cpu_to_le32(params->height);
993 cmd_p->target = cpu_to_le32(params->target);
994 cmd_p->bind = cpu_to_le32(params->bind);
995 cmd_p->depth = cpu_to_le32(params->depth);
996 cmd_p->array_size = cpu_to_le32(params->array_size);
997 cmd_p->last_level = cpu_to_le32(params->last_level);
998 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
999 cmd_p->flags = cpu_to_le32(params->flags);
1001 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1006 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1008 uint64_t offset, uint32_t level,
1010 uint32_t layer_stride,
1011 struct drm_virtgpu_3d_box *box,
1012 struct virtio_gpu_object_array *objs,
1013 struct virtio_gpu_fence *fence)
1015 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1016 struct virtio_gpu_transfer_host_3d *cmd_p;
1017 struct virtio_gpu_vbuffer *vbuf;
1018 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1020 if (virtio_gpu_is_shmem(bo) && use_dma_api) {
1021 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
1022 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1023 shmem->pages, DMA_TO_DEVICE);
1026 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1027 memset(cmd_p, 0, sizeof(*cmd_p));
1031 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1032 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1033 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1034 convert_to_hw_box(&cmd_p->box, box);
1035 cmd_p->offset = cpu_to_le64(offset);
1036 cmd_p->level = cpu_to_le32(level);
1037 cmd_p->stride = cpu_to_le32(stride);
1038 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1040 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1043 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1045 uint64_t offset, uint32_t level,
1047 uint32_t layer_stride,
1048 struct drm_virtgpu_3d_box *box,
1049 struct virtio_gpu_object_array *objs,
1050 struct virtio_gpu_fence *fence)
1052 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1053 struct virtio_gpu_transfer_host_3d *cmd_p;
1054 struct virtio_gpu_vbuffer *vbuf;
1056 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1057 memset(cmd_p, 0, sizeof(*cmd_p));
1061 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1062 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1063 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1064 convert_to_hw_box(&cmd_p->box, box);
1065 cmd_p->offset = cpu_to_le64(offset);
1066 cmd_p->level = cpu_to_le32(level);
1067 cmd_p->stride = cpu_to_le32(stride);
1068 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1070 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1073 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1074 void *data, uint32_t data_size,
1076 struct virtio_gpu_object_array *objs,
1077 struct virtio_gpu_fence *fence)
1079 struct virtio_gpu_cmd_submit *cmd_p;
1080 struct virtio_gpu_vbuffer *vbuf;
1082 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1083 memset(cmd_p, 0, sizeof(*cmd_p));
1085 vbuf->data_buf = data;
1086 vbuf->data_size = data_size;
1089 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1090 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1091 cmd_p->size = cpu_to_le32(data_size);
1093 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1096 void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1097 struct virtio_gpu_object *obj,
1098 struct virtio_gpu_mem_entry *ents,
1101 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1105 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1106 struct virtio_gpu_output *output)
1108 struct virtio_gpu_vbuffer *vbuf;
1109 struct virtio_gpu_update_cursor *cur_p;
1111 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1112 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1113 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1114 virtio_gpu_queue_cursor(vgdev, vbuf);
1117 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1118 struct virtio_gpu_vbuffer *vbuf)
1120 struct virtio_gpu_object *obj =
1121 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1122 struct virtio_gpu_resp_resource_uuid *resp =
1123 (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1124 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1126 spin_lock(&vgdev->resource_export_lock);
1127 WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1129 if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1130 obj->uuid_state == STATE_INITIALIZING) {
1131 import_uuid(&obj->uuid, resp->uuid);
1132 obj->uuid_state = STATE_OK;
1134 obj->uuid_state = STATE_ERR;
1136 spin_unlock(&vgdev->resource_export_lock);
1138 wake_up_all(&vgdev->resp_wq);
1142 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1143 struct virtio_gpu_object_array *objs)
1145 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1146 struct virtio_gpu_resource_assign_uuid *cmd_p;
1147 struct virtio_gpu_vbuffer *vbuf;
1148 struct virtio_gpu_resp_resource_uuid *resp_buf;
1150 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1152 spin_lock(&vgdev->resource_export_lock);
1153 bo->uuid_state = STATE_ERR;
1154 spin_unlock(&vgdev->resource_export_lock);
1155 virtio_gpu_array_put_free(objs);
1159 cmd_p = virtio_gpu_alloc_cmd_resp
1160 (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1161 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1162 memset(cmd_p, 0, sizeof(*cmd_p));
1164 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1165 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1168 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1172 static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
1173 struct virtio_gpu_vbuffer *vbuf)
1175 struct virtio_gpu_object *bo =
1176 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1177 struct virtio_gpu_resp_map_info *resp =
1178 (struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
1179 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
1180 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1182 spin_lock(&vgdev->host_visible_lock);
1184 if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
1185 vram->map_info = resp->map_info;
1186 vram->map_state = STATE_OK;
1188 vram->map_state = STATE_ERR;
1191 spin_unlock(&vgdev->host_visible_lock);
1192 wake_up_all(&vgdev->resp_wq);
1195 int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
1196 struct virtio_gpu_object_array *objs, uint64_t offset)
1198 struct virtio_gpu_resource_map_blob *cmd_p;
1199 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1200 struct virtio_gpu_vbuffer *vbuf;
1201 struct virtio_gpu_resp_map_info *resp_buf;
1203 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1207 cmd_p = virtio_gpu_alloc_cmd_resp
1208 (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
1209 sizeof(struct virtio_gpu_resp_map_info), resp_buf);
1210 memset(cmd_p, 0, sizeof(*cmd_p));
1212 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
1213 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1214 cmd_p->offset = cpu_to_le64(offset);
1217 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1221 void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
1222 struct virtio_gpu_object *bo)
1224 struct virtio_gpu_resource_unmap_blob *cmd_p;
1225 struct virtio_gpu_vbuffer *vbuf;
1227 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1228 memset(cmd_p, 0, sizeof(*cmd_p));
1230 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
1231 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1233 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1237 virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1238 struct virtio_gpu_object *bo,
1239 struct virtio_gpu_object_params *params,
1240 struct virtio_gpu_mem_entry *ents,
1243 struct virtio_gpu_resource_create_blob *cmd_p;
1244 struct virtio_gpu_vbuffer *vbuf;
1246 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1247 memset(cmd_p, 0, sizeof(*cmd_p));
1249 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
1250 cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
1251 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1252 cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
1253 cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
1254 cmd_p->blob_id = cpu_to_le64(params->blob_id);
1255 cmd_p->size = cpu_to_le64(params->size);
1256 cmd_p->nr_entries = cpu_to_le32(nents);
1258 vbuf->data_buf = ents;
1259 vbuf->data_size = sizeof(*ents) * nents;
1261 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1265 void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1266 uint32_t scanout_id,
1267 struct virtio_gpu_object *bo,
1268 struct drm_framebuffer *fb,
1269 uint32_t width, uint32_t height,
1270 uint32_t x, uint32_t y)
1273 struct virtio_gpu_set_scanout_blob *cmd_p;
1274 struct virtio_gpu_vbuffer *vbuf;
1275 uint32_t format = virtio_gpu_translate_format(fb->format->format);
1277 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1278 memset(cmd_p, 0, sizeof(*cmd_p));
1280 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
1281 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1282 cmd_p->scanout_id = cpu_to_le32(scanout_id);
1284 cmd_p->format = cpu_to_le32(format);
1285 cmd_p->width = cpu_to_le32(fb->width);
1286 cmd_p->height = cpu_to_le32(fb->height);
1288 for (i = 0; i < 4; i++) {
1289 cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
1290 cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
1293 cmd_p->r.width = cpu_to_le32(width);
1294 cmd_p->r.height = cpu_to_le32(height);
1295 cmd_p->r.x = cpu_to_le32(x);
1296 cmd_p->r.y = cpu_to_le32(y);
1298 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);