2 * Copyright (C) 2015 Red Hat, Inc.
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
34 #include "virtgpu_drv.h"
35 #include "virtgpu_trace.h"
37 #define MAX_INLINE_CMD_SIZE 96
38 #define MAX_INLINE_RESP_SIZE 24
39 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
40 + MAX_INLINE_CMD_SIZE \
41 + MAX_INLINE_RESP_SIZE)
43 static void convert_to_hw_box(struct virtio_gpu_box *dst,
44 const struct drm_virtgpu_3d_box *src)
46 dst->x = cpu_to_le32(src->x);
47 dst->y = cpu_to_le32(src->y);
48 dst->z = cpu_to_le32(src->z);
49 dst->w = cpu_to_le32(src->w);
50 dst->h = cpu_to_le32(src->h);
51 dst->d = cpu_to_le32(src->d);
54 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
56 struct drm_device *dev = vq->vdev->priv;
57 struct virtio_gpu_device *vgdev = dev->dev_private;
59 schedule_work(&vgdev->ctrlq.dequeue_work);
62 void virtio_gpu_cursor_ack(struct virtqueue *vq)
64 struct drm_device *dev = vq->vdev->priv;
65 struct virtio_gpu_device *vgdev = dev->dev_private;
67 schedule_work(&vgdev->cursorq.dequeue_work);
70 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
72 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
74 __alignof__(struct virtio_gpu_vbuffer),
81 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
83 kmem_cache_destroy(vgdev->vbufs);
87 static struct virtio_gpu_vbuffer*
88 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
89 int size, int resp_size, void *resp_buf,
90 virtio_gpu_resp_cb resp_cb)
92 struct virtio_gpu_vbuffer *vbuf;
94 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
96 return ERR_PTR(-ENOMEM);
98 BUG_ON(size > MAX_INLINE_CMD_SIZE);
99 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
102 vbuf->resp_cb = resp_cb;
103 vbuf->resp_size = resp_size;
104 if (resp_size <= MAX_INLINE_RESP_SIZE)
105 vbuf->resp_buf = (void *)vbuf->buf + size;
107 vbuf->resp_buf = resp_buf;
108 BUG_ON(!vbuf->resp_buf);
112 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
113 struct virtio_gpu_vbuffer **vbuffer_p,
116 struct virtio_gpu_vbuffer *vbuf;
118 vbuf = virtio_gpu_get_vbuf(vgdev, size,
119 sizeof(struct virtio_gpu_ctrl_hdr),
123 return ERR_CAST(vbuf);
129 static struct virtio_gpu_update_cursor*
130 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
131 struct virtio_gpu_vbuffer **vbuffer_p)
133 struct virtio_gpu_vbuffer *vbuf;
135 vbuf = virtio_gpu_get_vbuf
136 (vgdev, sizeof(struct virtio_gpu_update_cursor),
140 return ERR_CAST(vbuf);
143 return (struct virtio_gpu_update_cursor *)vbuf->buf;
146 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
147 virtio_gpu_resp_cb cb,
148 struct virtio_gpu_vbuffer **vbuffer_p,
149 int cmd_size, int resp_size,
152 struct virtio_gpu_vbuffer *vbuf;
154 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
155 resp_size, resp_buf, cb);
158 return ERR_CAST(vbuf);
161 return (struct virtio_gpu_command *)vbuf->buf;
164 static void free_vbuf(struct virtio_gpu_device *vgdev,
165 struct virtio_gpu_vbuffer *vbuf)
167 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
168 kfree(vbuf->resp_buf);
169 kvfree(vbuf->data_buf);
170 kmem_cache_free(vgdev->vbufs, vbuf);
173 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
175 struct virtio_gpu_vbuffer *vbuf;
179 while ((vbuf = virtqueue_get_buf(vq, &len))) {
180 list_add_tail(&vbuf->list, reclaim_list);
184 DRM_DEBUG("Huh? zero vbufs reclaimed");
187 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
189 struct virtio_gpu_device *vgdev =
190 container_of(work, struct virtio_gpu_device,
192 struct list_head reclaim_list;
193 struct virtio_gpu_vbuffer *entry, *tmp;
194 struct virtio_gpu_ctrl_hdr *resp;
197 INIT_LIST_HEAD(&reclaim_list);
198 spin_lock(&vgdev->ctrlq.qlock);
200 virtqueue_disable_cb(vgdev->ctrlq.vq);
201 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
203 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
204 spin_unlock(&vgdev->ctrlq.qlock);
206 list_for_each_entry(entry, &reclaim_list, list) {
207 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
209 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
211 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
212 if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
213 struct virtio_gpu_ctrl_hdr *cmd;
214 cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
215 DRM_ERROR("response 0x%x (command 0x%x)\n",
216 le32_to_cpu(resp->type),
217 le32_to_cpu(cmd->type));
219 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
221 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
222 u64 f = le64_to_cpu(resp->fence_id);
225 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
226 __func__, fence_id, f);
232 entry->resp_cb(vgdev, entry);
234 wake_up(&vgdev->ctrlq.ack_queue);
237 virtio_gpu_fence_event_process(vgdev, fence_id);
239 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
241 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
242 list_del(&entry->list);
243 free_vbuf(vgdev, entry);
247 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
249 struct virtio_gpu_device *vgdev =
250 container_of(work, struct virtio_gpu_device,
251 cursorq.dequeue_work);
252 struct list_head reclaim_list;
253 struct virtio_gpu_vbuffer *entry, *tmp;
255 INIT_LIST_HEAD(&reclaim_list);
256 spin_lock(&vgdev->cursorq.qlock);
258 virtqueue_disable_cb(vgdev->cursorq.vq);
259 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
260 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
261 spin_unlock(&vgdev->cursorq.qlock);
263 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
264 list_del(&entry->list);
265 free_vbuf(vgdev, entry);
267 wake_up(&vgdev->cursorq.ack_queue);
270 /* Create sg_table from a vmalloc'd buffer. */
271 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
274 struct sg_table *sgt;
275 struct scatterlist *sg;
278 if (WARN_ON(!PAGE_ALIGNED(data)))
281 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
285 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
286 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
292 for_each_sg(sgt->sgl, sg, *sg_ents, i) {
293 pg = vmalloc_to_page(data);
300 s = min_t(int, PAGE_SIZE, size);
301 sg_set_page(sg, pg, s, 0);
310 static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
311 struct virtio_gpu_vbuffer *vbuf,
312 struct scatterlist *vout)
313 __releases(&vgdev->ctrlq.qlock)
314 __acquires(&vgdev->ctrlq.qlock)
316 struct virtqueue *vq = vgdev->ctrlq.vq;
317 struct scatterlist *sgs[3], vcmd, vresp;
318 int outcnt = 0, incnt = 0;
322 if (!vgdev->vqs_ready)
325 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
326 sgs[outcnt + incnt] = &vcmd;
330 sgs[outcnt + incnt] = vout;
334 if (vbuf->resp_size) {
335 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
336 sgs[outcnt + incnt] = &vresp;
341 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
342 if (ret == -ENOSPC) {
343 spin_unlock(&vgdev->ctrlq.qlock);
344 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
345 spin_lock(&vgdev->ctrlq.qlock);
348 trace_virtio_gpu_cmd_queue(vq,
349 (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
351 notify = virtqueue_kick_prepare(vq);
356 static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
357 struct virtio_gpu_vbuffer *vbuf,
358 struct virtio_gpu_ctrl_hdr *hdr,
359 struct virtio_gpu_fence *fence)
361 struct virtqueue *vq = vgdev->ctrlq.vq;
362 struct scatterlist *vout = NULL, sg;
363 struct sg_table *sgt = NULL;
367 if (vbuf->data_size) {
368 if (is_vmalloc_addr(vbuf->data_buf)) {
369 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
375 sg_init_one(&sg, vbuf->data_buf, vbuf->data_size);
382 spin_lock(&vgdev->ctrlq.qlock);
385 * Make sure we have enouth space in the virtqueue. If not
386 * wait here until we have.
388 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
389 * to wait for free space, which can result in fence ids being
390 * submitted out-of-order.
392 if (vq->num_free < 2 + outcnt) {
393 spin_unlock(&vgdev->ctrlq.qlock);
394 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
399 virtio_gpu_fence_emit(vgdev, hdr, fence);
401 virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
402 virtio_gpu_array_unlock_resv(vbuf->objs);
405 notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
406 spin_unlock(&vgdev->ctrlq.qlock);
408 virtqueue_notify(vgdev->ctrlq.vq);
416 static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
417 struct virtio_gpu_vbuffer *vbuf)
419 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
422 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
423 struct virtio_gpu_vbuffer *vbuf)
425 struct virtqueue *vq = vgdev->cursorq.vq;
426 struct scatterlist *sgs[1], ccmd;
431 if (!vgdev->vqs_ready)
434 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
438 spin_lock(&vgdev->cursorq.qlock);
440 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
441 if (ret == -ENOSPC) {
442 spin_unlock(&vgdev->cursorq.qlock);
443 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
444 spin_lock(&vgdev->cursorq.qlock);
447 trace_virtio_gpu_cmd_queue(vq,
448 (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
450 notify = virtqueue_kick_prepare(vq);
453 spin_unlock(&vgdev->cursorq.qlock);
456 virtqueue_notify(vq);
459 /* just create gem objects for userspace and long lived objects,
460 * just use dma_alloced pages for the queue objects?
463 /* create a basic resource */
464 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
465 struct virtio_gpu_object *bo,
466 struct virtio_gpu_object_params *params,
467 struct virtio_gpu_object_array *objs,
468 struct virtio_gpu_fence *fence)
470 struct virtio_gpu_resource_create_2d *cmd_p;
471 struct virtio_gpu_vbuffer *vbuf;
473 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
474 memset(cmd_p, 0, sizeof(*cmd_p));
477 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
478 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
479 cmd_p->format = cpu_to_le32(params->format);
480 cmd_p->width = cpu_to_le32(params->width);
481 cmd_p->height = cpu_to_le32(params->height);
483 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
487 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
488 uint32_t resource_id)
490 struct virtio_gpu_resource_unref *cmd_p;
491 struct virtio_gpu_vbuffer *vbuf;
493 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
494 memset(cmd_p, 0, sizeof(*cmd_p));
496 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
497 cmd_p->resource_id = cpu_to_le32(resource_id);
499 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
502 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
503 uint32_t resource_id,
504 struct virtio_gpu_fence *fence)
506 struct virtio_gpu_resource_detach_backing *cmd_p;
507 struct virtio_gpu_vbuffer *vbuf;
509 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
510 memset(cmd_p, 0, sizeof(*cmd_p));
512 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
513 cmd_p->resource_id = cpu_to_le32(resource_id);
515 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
518 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
519 uint32_t scanout_id, uint32_t resource_id,
520 uint32_t width, uint32_t height,
521 uint32_t x, uint32_t y)
523 struct virtio_gpu_set_scanout *cmd_p;
524 struct virtio_gpu_vbuffer *vbuf;
526 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
527 memset(cmd_p, 0, sizeof(*cmd_p));
529 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
530 cmd_p->resource_id = cpu_to_le32(resource_id);
531 cmd_p->scanout_id = cpu_to_le32(scanout_id);
532 cmd_p->r.width = cpu_to_le32(width);
533 cmd_p->r.height = cpu_to_le32(height);
534 cmd_p->r.x = cpu_to_le32(x);
535 cmd_p->r.y = cpu_to_le32(y);
537 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
540 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
541 uint32_t resource_id,
542 uint32_t x, uint32_t y,
543 uint32_t width, uint32_t height)
545 struct virtio_gpu_resource_flush *cmd_p;
546 struct virtio_gpu_vbuffer *vbuf;
548 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
549 memset(cmd_p, 0, sizeof(*cmd_p));
551 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
552 cmd_p->resource_id = cpu_to_le32(resource_id);
553 cmd_p->r.width = cpu_to_le32(width);
554 cmd_p->r.height = cpu_to_le32(height);
555 cmd_p->r.x = cpu_to_le32(x);
556 cmd_p->r.y = cpu_to_le32(y);
558 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
561 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
563 uint32_t width, uint32_t height,
564 uint32_t x, uint32_t y,
565 struct virtio_gpu_object_array *objs,
566 struct virtio_gpu_fence *fence)
568 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
569 struct virtio_gpu_transfer_to_host_2d *cmd_p;
570 struct virtio_gpu_vbuffer *vbuf;
571 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
574 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
575 bo->pages->sgl, bo->pages->nents,
578 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
579 memset(cmd_p, 0, sizeof(*cmd_p));
582 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
583 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
584 cmd_p->offset = cpu_to_le64(offset);
585 cmd_p->r.width = cpu_to_le32(width);
586 cmd_p->r.height = cpu_to_le32(height);
587 cmd_p->r.x = cpu_to_le32(x);
588 cmd_p->r.y = cpu_to_le32(y);
590 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
594 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
595 uint32_t resource_id,
596 struct virtio_gpu_mem_entry *ents,
598 struct virtio_gpu_fence *fence)
600 struct virtio_gpu_resource_attach_backing *cmd_p;
601 struct virtio_gpu_vbuffer *vbuf;
603 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
604 memset(cmd_p, 0, sizeof(*cmd_p));
606 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
607 cmd_p->resource_id = cpu_to_le32(resource_id);
608 cmd_p->nr_entries = cpu_to_le32(nents);
610 vbuf->data_buf = ents;
611 vbuf->data_size = sizeof(*ents) * nents;
613 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
616 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
617 struct virtio_gpu_vbuffer *vbuf)
619 struct virtio_gpu_resp_display_info *resp =
620 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
623 spin_lock(&vgdev->display_info_lock);
624 for (i = 0; i < vgdev->num_scanouts; i++) {
625 vgdev->outputs[i].info = resp->pmodes[i];
626 if (resp->pmodes[i].enabled) {
627 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
628 le32_to_cpu(resp->pmodes[i].r.width),
629 le32_to_cpu(resp->pmodes[i].r.height),
630 le32_to_cpu(resp->pmodes[i].r.x),
631 le32_to_cpu(resp->pmodes[i].r.y));
633 DRM_DEBUG("output %d: disabled", i);
637 vgdev->display_info_pending = false;
638 spin_unlock(&vgdev->display_info_lock);
639 wake_up(&vgdev->resp_wq);
641 if (!drm_helper_hpd_irq_event(vgdev->ddev))
642 drm_kms_helper_hotplug_event(vgdev->ddev);
645 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
646 struct virtio_gpu_vbuffer *vbuf)
648 struct virtio_gpu_get_capset_info *cmd =
649 (struct virtio_gpu_get_capset_info *)vbuf->buf;
650 struct virtio_gpu_resp_capset_info *resp =
651 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
652 int i = le32_to_cpu(cmd->capset_index);
654 spin_lock(&vgdev->display_info_lock);
655 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
656 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
657 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
658 spin_unlock(&vgdev->display_info_lock);
659 wake_up(&vgdev->resp_wq);
662 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
663 struct virtio_gpu_vbuffer *vbuf)
665 struct virtio_gpu_get_capset *cmd =
666 (struct virtio_gpu_get_capset *)vbuf->buf;
667 struct virtio_gpu_resp_capset *resp =
668 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
669 struct virtio_gpu_drv_cap_cache *cache_ent;
671 spin_lock(&vgdev->display_info_lock);
672 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
673 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
674 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
675 memcpy(cache_ent->caps_cache, resp->capset_data,
677 /* Copy must occur before is_valid is signalled. */
679 atomic_set(&cache_ent->is_valid, 1);
683 spin_unlock(&vgdev->display_info_lock);
684 wake_up_all(&vgdev->resp_wq);
687 static int virtio_get_edid_block(void *data, u8 *buf,
688 unsigned int block, size_t len)
690 struct virtio_gpu_resp_edid *resp = data;
691 size_t start = block * EDID_LENGTH;
693 if (start + len > le32_to_cpu(resp->size))
695 memcpy(buf, resp->edid + start, len);
699 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
700 struct virtio_gpu_vbuffer *vbuf)
702 struct virtio_gpu_cmd_get_edid *cmd =
703 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
704 struct virtio_gpu_resp_edid *resp =
705 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
706 uint32_t scanout = le32_to_cpu(cmd->scanout);
707 struct virtio_gpu_output *output;
708 struct edid *new_edid, *old_edid;
710 if (scanout >= vgdev->num_scanouts)
712 output = vgdev->outputs + scanout;
714 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
715 drm_connector_update_edid_property(&output->conn, new_edid);
717 spin_lock(&vgdev->display_info_lock);
718 old_edid = output->edid;
719 output->edid = new_edid;
720 spin_unlock(&vgdev->display_info_lock);
723 wake_up(&vgdev->resp_wq);
726 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
728 struct virtio_gpu_ctrl_hdr *cmd_p;
729 struct virtio_gpu_vbuffer *vbuf;
732 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
737 cmd_p = virtio_gpu_alloc_cmd_resp
738 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
739 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
741 memset(cmd_p, 0, sizeof(*cmd_p));
743 vgdev->display_info_pending = true;
744 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
745 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
749 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
751 struct virtio_gpu_get_capset_info *cmd_p;
752 struct virtio_gpu_vbuffer *vbuf;
755 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
760 cmd_p = virtio_gpu_alloc_cmd_resp
761 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
762 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
764 memset(cmd_p, 0, sizeof(*cmd_p));
766 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
767 cmd_p->capset_index = cpu_to_le32(idx);
768 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
772 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
773 int idx, int version,
774 struct virtio_gpu_drv_cap_cache **cache_p)
776 struct virtio_gpu_get_capset *cmd_p;
777 struct virtio_gpu_vbuffer *vbuf;
779 struct virtio_gpu_drv_cap_cache *cache_ent;
780 struct virtio_gpu_drv_cap_cache *search_ent;
785 if (idx >= vgdev->num_capsets)
788 if (version > vgdev->capsets[idx].max_version)
791 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
795 max_size = vgdev->capsets[idx].max_size;
796 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
797 if (!cache_ent->caps_cache) {
802 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
805 kfree(cache_ent->caps_cache);
810 cache_ent->version = version;
811 cache_ent->id = vgdev->capsets[idx].id;
812 atomic_set(&cache_ent->is_valid, 0);
813 cache_ent->size = max_size;
814 spin_lock(&vgdev->display_info_lock);
815 /* Search while under lock in case it was added by another task. */
816 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
817 if (search_ent->id == vgdev->capsets[idx].id &&
818 search_ent->version == version) {
819 *cache_p = search_ent;
824 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
825 spin_unlock(&vgdev->display_info_lock);
828 /* Entry was found, so free everything that was just created. */
830 kfree(cache_ent->caps_cache);
835 cmd_p = virtio_gpu_alloc_cmd_resp
836 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
837 sizeof(struct virtio_gpu_resp_capset) + max_size,
839 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
840 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
841 cmd_p->capset_version = cpu_to_le32(version);
842 *cache_p = cache_ent;
843 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
848 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
850 struct virtio_gpu_cmd_get_edid *cmd_p;
851 struct virtio_gpu_vbuffer *vbuf;
855 if (WARN_ON(!vgdev->has_edid))
858 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
859 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
864 cmd_p = virtio_gpu_alloc_cmd_resp
865 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
866 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
868 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
869 cmd_p->scanout = cpu_to_le32(scanout);
870 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
876 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
877 uint32_t nlen, const char *name)
879 struct virtio_gpu_ctx_create *cmd_p;
880 struct virtio_gpu_vbuffer *vbuf;
882 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
883 memset(cmd_p, 0, sizeof(*cmd_p));
885 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
886 cmd_p->hdr.ctx_id = cpu_to_le32(id);
887 cmd_p->nlen = cpu_to_le32(nlen);
888 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
889 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
890 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
893 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
896 struct virtio_gpu_ctx_destroy *cmd_p;
897 struct virtio_gpu_vbuffer *vbuf;
899 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
900 memset(cmd_p, 0, sizeof(*cmd_p));
902 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
903 cmd_p->hdr.ctx_id = cpu_to_le32(id);
904 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
907 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
909 struct virtio_gpu_object_array *objs)
911 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
912 struct virtio_gpu_ctx_resource *cmd_p;
913 struct virtio_gpu_vbuffer *vbuf;
915 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
916 memset(cmd_p, 0, sizeof(*cmd_p));
919 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
920 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
921 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
922 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
926 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
928 struct virtio_gpu_object_array *objs)
930 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
931 struct virtio_gpu_ctx_resource *cmd_p;
932 struct virtio_gpu_vbuffer *vbuf;
934 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
935 memset(cmd_p, 0, sizeof(*cmd_p));
938 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
939 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
940 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
941 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
945 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
946 struct virtio_gpu_object *bo,
947 struct virtio_gpu_object_params *params,
948 struct virtio_gpu_object_array *objs,
949 struct virtio_gpu_fence *fence)
951 struct virtio_gpu_resource_create_3d *cmd_p;
952 struct virtio_gpu_vbuffer *vbuf;
954 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
955 memset(cmd_p, 0, sizeof(*cmd_p));
958 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
959 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
960 cmd_p->format = cpu_to_le32(params->format);
961 cmd_p->width = cpu_to_le32(params->width);
962 cmd_p->height = cpu_to_le32(params->height);
964 cmd_p->target = cpu_to_le32(params->target);
965 cmd_p->bind = cpu_to_le32(params->bind);
966 cmd_p->depth = cpu_to_le32(params->depth);
967 cmd_p->array_size = cpu_to_le32(params->array_size);
968 cmd_p->last_level = cpu_to_le32(params->last_level);
969 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
970 cmd_p->flags = cpu_to_le32(params->flags);
972 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
976 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
978 uint64_t offset, uint32_t level,
979 struct drm_virtgpu_3d_box *box,
980 struct virtio_gpu_object_array *objs,
981 struct virtio_gpu_fence *fence)
983 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
984 struct virtio_gpu_transfer_host_3d *cmd_p;
985 struct virtio_gpu_vbuffer *vbuf;
986 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
989 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
990 bo->pages->sgl, bo->pages->nents,
993 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
994 memset(cmd_p, 0, sizeof(*cmd_p));
998 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
999 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1000 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1001 convert_to_hw_box(&cmd_p->box, box);
1002 cmd_p->offset = cpu_to_le64(offset);
1003 cmd_p->level = cpu_to_le32(level);
1005 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1008 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1010 uint64_t offset, uint32_t level,
1011 struct drm_virtgpu_3d_box *box,
1012 struct virtio_gpu_object_array *objs,
1013 struct virtio_gpu_fence *fence)
1015 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1016 struct virtio_gpu_transfer_host_3d *cmd_p;
1017 struct virtio_gpu_vbuffer *vbuf;
1019 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1020 memset(cmd_p, 0, sizeof(*cmd_p));
1024 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1025 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1026 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1027 convert_to_hw_box(&cmd_p->box, box);
1028 cmd_p->offset = cpu_to_le64(offset);
1029 cmd_p->level = cpu_to_le32(level);
1031 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1034 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1035 void *data, uint32_t data_size,
1037 struct virtio_gpu_object_array *objs,
1038 struct virtio_gpu_fence *fence)
1040 struct virtio_gpu_cmd_submit *cmd_p;
1041 struct virtio_gpu_vbuffer *vbuf;
1043 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1044 memset(cmd_p, 0, sizeof(*cmd_p));
1046 vbuf->data_buf = data;
1047 vbuf->data_size = data_size;
1050 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1051 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1052 cmd_p->size = cpu_to_le32(data_size);
1054 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1057 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1058 struct virtio_gpu_object *obj,
1059 struct virtio_gpu_fence *fence)
1061 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1062 struct virtio_gpu_mem_entry *ents;
1063 struct scatterlist *sg;
1066 if (WARN_ON_ONCE(!obj->created))
1068 if (WARN_ON_ONCE(obj->pages))
1071 ret = drm_gem_shmem_pin(&obj->base.base);
1075 obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base);
1076 if (obj->pages == NULL) {
1077 drm_gem_shmem_unpin(&obj->base.base);
1082 obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
1083 obj->pages->sgl, obj->pages->nents,
1085 nents = obj->mapped;
1087 nents = obj->pages->nents;
1090 /* gets freed when the ring has consumed it */
1091 ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
1094 DRM_ERROR("failed to allocate ent list\n");
1098 for_each_sg(obj->pages->sgl, sg, nents, si) {
1099 ents[si].addr = cpu_to_le64(use_dma_api
1100 ? sg_dma_address(sg)
1102 ents[si].length = cpu_to_le32(sg->length);
1103 ents[si].padding = 0;
1106 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1112 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1113 struct virtio_gpu_object *obj)
1115 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1117 if (WARN_ON_ONCE(!obj->pages))
1120 if (use_dma_api && obj->mapped) {
1121 struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
1122 /* detach backing and wait for the host process it ... */
1123 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
1124 dma_fence_wait(&fence->f, true);
1125 dma_fence_put(&fence->f);
1127 /* ... then tear down iommu mappings */
1128 dma_unmap_sg(vgdev->vdev->dev.parent,
1129 obj->pages->sgl, obj->mapped,
1133 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
1136 sg_free_table(obj->pages);
1139 drm_gem_shmem_unpin(&obj->base.base);
1142 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1143 struct virtio_gpu_output *output)
1145 struct virtio_gpu_vbuffer *vbuf;
1146 struct virtio_gpu_update_cursor *cur_p;
1148 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1149 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1150 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1151 virtio_gpu_queue_cursor(vgdev, vbuf);