3 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * Stanislav Vorobiov <s.vorobiov@samsung.com>
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <linux/stddef.h>
41 #include <xf86atomic.h>
46 #define vigs_offsetof(type, member) ((size_t)&((type*)0)->member)
48 #define vigs_containerof(ptr, type, member) ((type*)((char*)(ptr) - vigs_offsetof(type, member)))
50 struct vigs_drm_gem_info
55 struct vigs_drm_gem_impl
57 struct vigs_drm_gem_info info;
59 struct vigs_drm_gem gem;
62 struct vigs_drm_surface_impl
64 struct vigs_drm_gem_info gem_info;
66 struct vigs_drm_surface base;
69 struct vigs_drm_execbuffer_impl
71 struct vigs_drm_gem_info gem_info;
73 struct vigs_drm_execbuffer base;
76 struct vigs_drm_fence_impl
78 struct vigs_drm_fence base;
83 static void vigs_drm_gem_close(struct vigs_drm_device *dev, uint32_t handle)
85 struct drm_gem_close req =
91 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
95 static void vigs_drm_gem_impl_init(struct vigs_drm_gem_impl *gem_impl,
96 struct vigs_drm_device *dev,
101 atomic_set(&gem_impl->info.ref_count, 1);
102 gem_impl->gem.dev = dev;
103 gem_impl->gem.size = size;
104 gem_impl->gem.handle = handle;
105 gem_impl->gem.name = name;
108 int vigs_drm_device_create(int fd, struct vigs_drm_device **dev)
110 drmVersionPtr version;
114 *dev = calloc(sizeof(**dev), 1);
121 version = drmGetVersion(fd);
128 major = version->version_major;
130 drmFreeVersion(version);
132 if (major != DRM_VIGS_DRIVER_VERSION) {
149 void vigs_drm_device_destroy(struct vigs_drm_device *dev)
154 int vigs_drm_device_get_protocol_version(struct vigs_drm_device *dev,
155 uint32_t *protocol_version)
157 struct drm_vigs_get_protocol_version req;
160 ret = drmIoctl(dev->fd, DRM_IOCTL_VIGS_GET_PROTOCOL_VERSION, &req);
166 if (protocol_version) {
167 *protocol_version = req.version;
173 void vigs_drm_gem_ref(struct vigs_drm_gem *gem)
175 struct vigs_drm_gem_impl *gem_impl;
181 gem_impl = vigs_containerof(gem, struct vigs_drm_gem_impl, gem);
183 atomic_inc(&gem_impl->info.ref_count);
186 void vigs_drm_gem_unref(struct vigs_drm_gem *gem)
188 struct vigs_drm_gem_impl *gem_impl;
194 gem_impl = vigs_containerof(gem, struct vigs_drm_gem_impl, gem);
196 assert(atomic_read(&gem_impl->info.ref_count) > 0);
197 if (!atomic_dec_and_test(&gem_impl->info.ref_count)) {
202 munmap(gem->vaddr, gem->size);
205 vigs_drm_gem_close(gem->dev, gem->handle);
210 int vigs_drm_gem_get_name(struct vigs_drm_gem *gem)
212 struct drm_gem_flink req =
214 .handle = gem->handle,
222 ret = drmIoctl(gem->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
228 gem->name = req.name;
233 int vigs_drm_gem_map(struct vigs_drm_gem *gem, int track_access)
235 struct drm_vigs_gem_map req =
237 .handle = gem->handle,
238 .track_access = track_access
246 ret = drmIoctl(gem->dev->fd, DRM_IOCTL_VIGS_GEM_MAP, &req);
252 gem->vaddr = (void*)req.address;
257 void vigs_drm_gem_unmap(struct vigs_drm_gem *gem)
263 munmap(gem->vaddr, gem->size);
267 int vigs_drm_gem_wait(struct vigs_drm_gem *gem)
269 struct drm_vigs_gem_wait req =
271 .handle = gem->handle,
275 ret = drmIoctl(gem->dev->fd, DRM_IOCTL_VIGS_GEM_WAIT, &req);
284 int vigs_drm_surface_create(struct vigs_drm_device *dev,
290 struct vigs_drm_surface **sfc)
292 struct vigs_drm_surface_impl *sfc_impl;
293 struct drm_vigs_create_surface req =
303 sfc_impl = calloc(sizeof(*sfc_impl), 1);
310 ret = drmIoctl(dev->fd, DRM_IOCTL_VIGS_CREATE_SURFACE, &req);
317 vigs_drm_gem_impl_init((struct vigs_drm_gem_impl*)sfc_impl,
323 sfc_impl->base.width = width;
324 sfc_impl->base.height = height;
325 sfc_impl->base.stride = stride;
326 sfc_impl->base.format = format;
327 sfc_impl->base.scanout = scanout;
328 sfc_impl->base.id = req.id;
330 *sfc = &sfc_impl->base;
342 int vigs_drm_surface_open(struct vigs_drm_device *dev,
344 struct vigs_drm_surface **sfc)
346 struct vigs_drm_surface_impl *sfc_impl;
347 struct drm_gem_open req =
351 struct drm_vigs_surface_info info_req;
354 sfc_impl = calloc(sizeof(*sfc_impl), 1);
361 ret = drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req);
368 info_req.handle = req.handle;
370 ret = drmIoctl(dev->fd, DRM_IOCTL_VIGS_SURFACE_INFO, &info_req);
377 vigs_drm_gem_impl_init((struct vigs_drm_gem_impl*)sfc_impl,
383 sfc_impl->base.width = info_req.width;
384 sfc_impl->base.height = info_req.height;
385 sfc_impl->base.stride = info_req.stride;
386 sfc_impl->base.format = info_req.format;
387 sfc_impl->base.scanout = info_req.scanout;
388 sfc_impl->base.id = info_req.id;
390 *sfc = &sfc_impl->base;
395 vigs_drm_gem_close(dev, req.handle);
404 int vigs_drm_surface_set_gpu_dirty(struct vigs_drm_surface *sfc)
406 struct drm_vigs_surface_set_gpu_dirty req =
408 .handle = sfc->gem.handle
412 ret = drmIoctl(sfc->gem.dev->fd, DRM_IOCTL_VIGS_SURFACE_SET_GPU_DIRTY, &req);
414 return (ret != 0) ? -errno : 0;
417 int vigs_drm_surface_start_access(struct vigs_drm_surface *sfc,
420 struct drm_vigs_surface_start_access req =
422 .address = (unsigned long)sfc->gem.vaddr,
427 ret = drmIoctl(sfc->gem.dev->fd, DRM_IOCTL_VIGS_SURFACE_START_ACCESS, &req);
429 return (ret != 0) ? -errno : 0;
432 int vigs_drm_surface_end_access(struct vigs_drm_surface *sfc,
435 struct drm_vigs_surface_end_access req =
437 .address = (unsigned long)sfc->gem.vaddr,
442 ret = drmIoctl(sfc->gem.dev->fd, DRM_IOCTL_VIGS_SURFACE_END_ACCESS, &req);
444 return (ret != 0) ? -errno : 0;
447 int vigs_drm_execbuffer_create(struct vigs_drm_device *dev,
449 struct vigs_drm_execbuffer **execbuffer)
451 struct vigs_drm_execbuffer_impl *execbuffer_impl;
452 struct drm_vigs_create_execbuffer req =
458 execbuffer_impl = calloc(sizeof(*execbuffer_impl), 1);
460 if (!execbuffer_impl) {
465 ret = drmIoctl(dev->fd, DRM_IOCTL_VIGS_CREATE_EXECBUFFER, &req);
472 vigs_drm_gem_impl_init((struct vigs_drm_gem_impl*)execbuffer_impl,
478 *execbuffer = &execbuffer_impl->base;
483 free(execbuffer_impl);
490 int vigs_drm_execbuffer_open(struct vigs_drm_device *dev,
492 struct vigs_drm_execbuffer **execbuffer)
494 struct vigs_drm_execbuffer_impl *execbuffer_impl;
495 struct drm_gem_open req =
501 execbuffer_impl = calloc(sizeof(*execbuffer_impl), 1);
503 if (!execbuffer_impl) {
508 ret = drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req);
515 vigs_drm_gem_impl_init((struct vigs_drm_gem_impl*)execbuffer_impl,
521 *execbuffer = &execbuffer_impl->base;
526 free(execbuffer_impl);
533 int vigs_drm_execbuffer_exec(struct vigs_drm_execbuffer *execbuffer)
535 struct drm_vigs_exec req =
537 .handle = execbuffer->gem.handle
541 ret = drmIoctl(execbuffer->gem.dev->fd, DRM_IOCTL_VIGS_EXEC, &req);
543 return (ret != 0) ? -errno : 0;
546 int vigs_drm_fence_create(struct vigs_drm_device *dev,
548 struct vigs_drm_fence **fence)
550 struct vigs_drm_fence_impl *fence_impl;
551 struct drm_vigs_create_fence req =
557 fence_impl = calloc(sizeof(*fence_impl), 1);
564 ret = drmIoctl(dev->fd, DRM_IOCTL_VIGS_CREATE_FENCE, &req);
571 atomic_set(&fence_impl->ref_count, 1);
572 fence_impl->base.dev = dev;
573 fence_impl->base.handle = req.handle;
574 fence_impl->base.seq = req.seq;
575 fence_impl->base.signaled = 0;
577 *fence = &fence_impl->base;
589 void vigs_drm_fence_ref(struct vigs_drm_fence *fence)
591 struct vigs_drm_fence_impl *fence_impl;
597 fence_impl = vigs_containerof(fence, struct vigs_drm_fence_impl, base);
599 atomic_inc(&fence_impl->ref_count);
602 void vigs_drm_fence_unref(struct vigs_drm_fence *fence)
604 struct vigs_drm_fence_impl *fence_impl;
605 struct drm_vigs_fence_unref req;
611 fence_impl = vigs_containerof(fence, struct vigs_drm_fence_impl, base);
613 assert(atomic_read(&fence_impl->ref_count) > 0);
614 if (!atomic_dec_and_test(&fence_impl->ref_count)) {
618 req.handle = fence->handle;
620 drmIoctl(fence->dev->fd, DRM_IOCTL_VIGS_FENCE_UNREF, &req);
625 int vigs_drm_fence_wait(struct vigs_drm_fence *fence)
627 struct drm_vigs_fence_wait req =
629 .handle = fence->handle
633 ret = drmIoctl(fence->dev->fd, DRM_IOCTL_VIGS_FENCE_WAIT, &req);
635 return (ret != 0) ? -errno : 0;
638 int vigs_drm_fence_check(struct vigs_drm_fence *fence)
640 struct drm_vigs_fence_signaled req =
642 .handle = fence->handle
646 if (fence->signaled) {
650 ret = drmIoctl(fence->dev->fd, DRM_IOCTL_VIGS_FENCE_SIGNALED, &req);
656 fence->signaled = req.signaled;