2 * Copyright (C) 2015 Red Hat, Inc.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
29 #include "virtgpu_drv.h"
30 #include <drm/virtgpu_drm.h>
31 #include "ttm/ttm_execbuf_util.h"
33 static void convert_to_hw_box(struct virtio_gpu_box *dst,
34 const struct drm_virtgpu_3d_box *src)
36 dst->x = cpu_to_le32(src->x);
37 dst->y = cpu_to_le32(src->y);
38 dst->z = cpu_to_le32(src->z);
39 dst->w = cpu_to_le32(src->w);
40 dst->h = cpu_to_le32(src->h);
41 dst->d = cpu_to_le32(src->d);
44 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
45 struct drm_file *file_priv)
47 struct virtio_gpu_device *vgdev = dev->dev_private;
48 struct drm_virtgpu_map *virtio_gpu_map = data;
50 return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
51 virtio_gpu_map->handle,
52 &virtio_gpu_map->offset);
55 static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
56 struct list_head *head)
58 struct ttm_validate_buffer *buf;
59 struct ttm_buffer_object *bo;
60 struct virtio_gpu_object *qobj;
63 ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
67 list_for_each_entry(buf, head, head) {
69 qobj = container_of(bo, struct virtio_gpu_object, tbo);
70 ret = ttm_bo_validate(bo, &qobj->placement, false, false);
72 ttm_eu_backoff_reservation(ticket, head);
79 static void virtio_gpu_unref_list(struct list_head *head)
81 struct ttm_validate_buffer *buf;
82 struct ttm_buffer_object *bo;
83 struct virtio_gpu_object *qobj;
84 list_for_each_entry(buf, head, head) {
86 qobj = container_of(bo, struct virtio_gpu_object, tbo);
88 drm_gem_object_unreference_unlocked(&qobj->gem_base);
93 * Usage of execbuffer:
94 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
95 * However, the command as passed from user space must *not* contain the initial
96 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
98 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
99 struct drm_file *drm_file)
101 struct drm_virtgpu_execbuffer *exbuf = data;
102 struct virtio_gpu_device *vgdev = dev->dev_private;
103 struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
104 struct drm_gem_object *gobj;
105 struct virtio_gpu_fence *fence;
106 struct virtio_gpu_object *qobj;
108 uint32_t *bo_handles = NULL;
109 void __user *user_bo_handles = NULL;
110 struct list_head validate_list;
111 struct ttm_validate_buffer *buflist = NULL;
113 struct ww_acquire_ctx ticket;
116 if (vgdev->has_virgl_3d == false)
119 INIT_LIST_HEAD(&validate_list);
120 if (exbuf->num_bo_handles) {
122 bo_handles = drm_malloc_ab(exbuf->num_bo_handles,
124 buflist = drm_calloc_large(exbuf->num_bo_handles,
125 sizeof(struct ttm_validate_buffer));
126 if (!bo_handles || !buflist) {
127 drm_free_large(bo_handles);
128 drm_free_large(buflist);
132 user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
133 if (copy_from_user(bo_handles, user_bo_handles,
134 exbuf->num_bo_handles * sizeof(uint32_t))) {
136 drm_free_large(bo_handles);
137 drm_free_large(buflist);
141 for (i = 0; i < exbuf->num_bo_handles; i++) {
142 gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
144 drm_free_large(bo_handles);
145 drm_free_large(buflist);
149 qobj = gem_to_virtio_gpu_obj(gobj);
150 buflist[i].bo = &qobj->tbo;
152 list_add(&buflist[i].head, &validate_list);
154 drm_free_large(bo_handles);
157 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
161 buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
167 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
168 vfpriv->ctx_id, &fence);
170 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
172 /* fence the command bo */
173 virtio_gpu_unref_list(&validate_list);
174 drm_free_large(buflist);
175 fence_put(&fence->f);
179 ttm_eu_backoff_reservation(&ticket, &validate_list);
181 virtio_gpu_unref_list(&validate_list);
182 drm_free_large(buflist);
186 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
187 struct drm_file *file_priv)
189 struct virtio_gpu_device *vgdev = dev->dev_private;
190 struct drm_virtgpu_getparam *param = data;
193 switch (param->param) {
194 case VIRTGPU_PARAM_3D_FEATURES:
195 value = vgdev->has_virgl_3d == true ? 1 : 0;
200 if (copy_to_user((void __user *)(unsigned long)param->value,
201 &value, sizeof(int))) {
207 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
208 struct drm_file *file_priv)
210 struct virtio_gpu_device *vgdev = dev->dev_private;
211 struct drm_virtgpu_resource_create *rc = data;
214 struct virtio_gpu_object *qobj;
215 struct drm_gem_object *obj;
218 struct list_head validate_list;
219 struct ttm_validate_buffer mainbuf;
220 struct virtio_gpu_fence *fence = NULL;
221 struct ww_acquire_ctx ticket;
222 struct virtio_gpu_resource_create_3d rc_3d;
224 if (vgdev->has_virgl_3d == false) {
227 if (rc->nr_samples > 1)
229 if (rc->last_level > 1)
233 if (rc->array_size > 1)
237 INIT_LIST_HEAD(&validate_list);
238 memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
240 virtio_gpu_resource_id_get(vgdev, &res_id);
244 /* allocate a single page size object */
248 qobj = virtio_gpu_alloc_object(dev, size, false, false);
253 obj = &qobj->gem_base;
255 if (!vgdev->has_virgl_3d) {
256 virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
257 rc->width, rc->height);
259 ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
261 /* use a gem reference since unref list undoes them */
262 drm_gem_object_reference(&qobj->gem_base);
263 mainbuf.bo = &qobj->tbo;
264 list_add(&mainbuf.head, &validate_list);
266 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
268 DRM_DEBUG("failed to validate\n");
272 rc_3d.resource_id = cpu_to_le32(res_id);
273 rc_3d.target = cpu_to_le32(rc->target);
274 rc_3d.format = cpu_to_le32(rc->format);
275 rc_3d.bind = cpu_to_le32(rc->bind);
276 rc_3d.width = cpu_to_le32(rc->width);
277 rc_3d.height = cpu_to_le32(rc->height);
278 rc_3d.depth = cpu_to_le32(rc->depth);
279 rc_3d.array_size = cpu_to_le32(rc->array_size);
280 rc_3d.last_level = cpu_to_le32(rc->last_level);
281 rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
282 rc_3d.flags = cpu_to_le32(rc->flags);
284 virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
285 ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
287 ttm_eu_backoff_reservation(&ticket, &validate_list);
290 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
293 qobj->hw_res_handle = res_id;
295 ret = drm_gem_handle_create(file_priv, obj, &handle);
298 drm_gem_object_release(obj);
299 if (vgdev->has_virgl_3d) {
300 virtio_gpu_unref_list(&validate_list);
301 fence_put(&fence->f);
305 drm_gem_object_unreference_unlocked(obj);
307 rc->res_handle = res_id; /* similiar to a VM address */
308 rc->bo_handle = handle;
310 if (vgdev->has_virgl_3d) {
311 virtio_gpu_unref_list(&validate_list);
312 fence_put(&fence->f);
316 if (vgdev->has_virgl_3d) {
317 virtio_gpu_unref_list(&validate_list);
318 fence_put(&fence->f);
321 // drm_gem_object_handle_unreference_unlocked(obj);
323 virtio_gpu_resource_id_put(vgdev, res_id);
327 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
328 struct drm_file *file_priv)
330 struct drm_virtgpu_resource_info *ri = data;
331 struct drm_gem_object *gobj = NULL;
332 struct virtio_gpu_object *qobj = NULL;
334 gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
338 qobj = gem_to_virtio_gpu_obj(gobj);
340 ri->size = qobj->gem_base.size;
341 ri->res_handle = qobj->hw_res_handle;
342 drm_gem_object_unreference_unlocked(gobj);
346 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
348 struct drm_file *file)
350 struct virtio_gpu_device *vgdev = dev->dev_private;
351 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
352 struct drm_virtgpu_3d_transfer_from_host *args = data;
353 struct drm_gem_object *gobj = NULL;
354 struct virtio_gpu_object *qobj = NULL;
355 struct virtio_gpu_fence *fence;
357 u32 offset = args->offset;
358 struct virtio_gpu_box box;
360 if (vgdev->has_virgl_3d == false)
363 gobj = drm_gem_object_lookup(file, args->bo_handle);
367 qobj = gem_to_virtio_gpu_obj(gobj);
369 ret = virtio_gpu_object_reserve(qobj, false);
373 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
378 convert_to_hw_box(&box, &args->box);
379 virtio_gpu_cmd_transfer_from_host_3d
380 (vgdev, qobj->hw_res_handle,
381 vfpriv->ctx_id, offset, args->level,
383 reservation_object_add_excl_fence(qobj->tbo.resv,
386 fence_put(&fence->f);
388 virtio_gpu_object_unreserve(qobj);
390 drm_gem_object_unreference_unlocked(gobj);
394 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
395 struct drm_file *file)
397 struct virtio_gpu_device *vgdev = dev->dev_private;
398 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
399 struct drm_virtgpu_3d_transfer_to_host *args = data;
400 struct drm_gem_object *gobj = NULL;
401 struct virtio_gpu_object *qobj = NULL;
402 struct virtio_gpu_fence *fence;
403 struct virtio_gpu_box box;
405 u32 offset = args->offset;
407 gobj = drm_gem_object_lookup(file, args->bo_handle);
411 qobj = gem_to_virtio_gpu_obj(gobj);
413 ret = virtio_gpu_object_reserve(qobj, false);
417 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
422 convert_to_hw_box(&box, &args->box);
423 if (!vgdev->has_virgl_3d) {
424 virtio_gpu_cmd_transfer_to_host_2d
425 (vgdev, qobj->hw_res_handle, offset,
426 box.w, box.h, box.x, box.y, NULL);
428 virtio_gpu_cmd_transfer_to_host_3d
429 (vgdev, qobj->hw_res_handle,
430 vfpriv ? vfpriv->ctx_id : 0, offset,
431 args->level, &box, &fence);
432 reservation_object_add_excl_fence(qobj->tbo.resv,
434 fence_put(&fence->f);
438 virtio_gpu_object_unreserve(qobj);
440 drm_gem_object_unreference_unlocked(gobj);
444 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
445 struct drm_file *file)
447 struct drm_virtgpu_3d_wait *args = data;
448 struct drm_gem_object *gobj = NULL;
449 struct virtio_gpu_object *qobj = NULL;
453 gobj = drm_gem_object_lookup(file, args->handle);
457 qobj = gem_to_virtio_gpu_obj(gobj);
459 if (args->flags & VIRTGPU_WAIT_NOWAIT)
461 ret = virtio_gpu_object_wait(qobj, nowait);
463 drm_gem_object_unreference_unlocked(gobj);
467 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
468 void *data, struct drm_file *file)
470 struct virtio_gpu_device *vgdev = dev->dev_private;
471 struct drm_virtgpu_get_caps *args = data;
474 int found_valid = -1;
476 struct virtio_gpu_drv_cap_cache *cache_ent;
478 if (vgdev->num_capsets == 0)
481 spin_lock(&vgdev->display_info_lock);
482 for (i = 0; i < vgdev->num_capsets; i++) {
483 if (vgdev->capsets[i].id == args->cap_set_id) {
484 if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
491 if (found_valid == -1) {
492 spin_unlock(&vgdev->display_info_lock);
496 size = vgdev->capsets[found_valid].max_size;
497 if (args->size > size) {
498 spin_unlock(&vgdev->display_info_lock);
502 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
503 if (cache_ent->id == args->cap_set_id &&
504 cache_ent->version == args->cap_set_ver) {
505 ptr = cache_ent->caps_cache;
506 spin_unlock(&vgdev->display_info_lock);
510 spin_unlock(&vgdev->display_info_lock);
512 /* not in cache - need to talk to hw */
513 virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
516 ret = wait_event_timeout(vgdev->resp_wq,
517 atomic_read(&cache_ent->is_valid), 5 * HZ);
519 ptr = cache_ent->caps_cache;
522 if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
528 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
529 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
530 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
532 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
533 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
535 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
536 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
538 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
539 virtio_gpu_resource_create_ioctl,
540 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
542 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
543 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
545 /* make transfer async to the main ring? - no sure, can we
546 thread these in the underlying GL */
547 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
548 virtio_gpu_transfer_from_host_ioctl,
549 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
550 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
551 virtio_gpu_transfer_to_host_ioctl,
552 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
554 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
555 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
557 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
558 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),