3 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * Stanislav Vorobiov <s.vorobiov@samsung.com>
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <linux/stddef.h>
41 #include <xf86atomic.h>
46 #define vigs_offsetof(type, member) ((size_t)&((type*)0)->member)
48 #define vigs_containerof(ptr, type, member) ((type*)((char*)(ptr) - vigs_offsetof(type, member)))
50 struct vigs_drm_gem_info
55 struct vigs_drm_gem_impl
57 struct vigs_drm_gem_info info;
59 struct vigs_drm_gem gem;
62 struct vigs_drm_surface_impl
64 struct vigs_drm_gem_info gem_info;
66 struct vigs_drm_surface base;
69 struct vigs_drm_execbuffer_impl
71 struct vigs_drm_gem_info gem_info;
73 struct vigs_drm_execbuffer base;
76 struct vigs_drm_fence_impl
78 struct vigs_drm_fence base;
83 static void vigs_drm_gem_close(struct vigs_drm_device *dev, uint32_t handle)
85 struct drm_gem_close req =
91 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
95 static void vigs_drm_gem_impl_init(struct vigs_drm_gem_impl *gem_impl,
96 struct vigs_drm_device *dev,
101 atomic_set(&gem_impl->info.ref_count, 1);
102 gem_impl->gem.dev = dev;
103 gem_impl->gem.size = size;
104 gem_impl->gem.handle = handle;
105 gem_impl->gem.name = name;
108 int vigs_drm_device_create(int fd, struct vigs_drm_device **dev)
110 drmVersionPtr version;
114 *dev = calloc(sizeof(**dev), 1);
121 version = drmGetVersion(fd);
128 major = version->version_major;
130 drmFreeVersion(version);
132 if (major != DRM_VIGS_DRIVER_VERSION) {
149 void vigs_drm_device_destroy(struct vigs_drm_device *dev)
154 int vigs_drm_device_get_protocol_version(struct vigs_drm_device *dev,
155 uint32_t *protocol_version)
157 struct drm_vigs_get_protocol_version req;
160 ret = drmIoctl(dev->fd, DRM_IOCTL_VIGS_GET_PROTOCOL_VERSION, &req);
166 if (protocol_version) {
167 *protocol_version = req.version;
173 void vigs_drm_gem_ref(struct vigs_drm_gem *gem)
175 struct vigs_drm_gem_impl *gem_impl;
181 gem_impl = vigs_containerof(gem, struct vigs_drm_gem_impl, gem);
183 atomic_inc(&gem_impl->info.ref_count);
186 void vigs_drm_gem_unref(struct vigs_drm_gem *gem)
188 struct vigs_drm_gem_impl *gem_impl;
194 gem_impl = vigs_containerof(gem, struct vigs_drm_gem_impl, gem);
196 assert(atomic_read(&gem_impl->info.ref_count) > 0);
197 if (!atomic_dec_and_test(&gem_impl->info.ref_count)) {
202 munmap(gem->vaddr, gem->size);
205 vigs_drm_gem_close(gem->dev, gem->handle);
210 int vigs_drm_gem_get_name(struct vigs_drm_gem *gem)
212 struct drm_gem_flink req =
214 .handle = gem->handle,
222 ret = drmIoctl(gem->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
228 gem->name = req.name;
233 int vigs_drm_gem_map(struct vigs_drm_gem *gem, int track_access)
235 struct drm_vigs_gem_map req =
237 .handle = gem->handle,
238 .track_access = track_access
246 ret = drmIoctl(gem->dev->fd, DRM_IOCTL_VIGS_GEM_MAP, &req);
252 gem->vaddr = (void*)req.address;
257 void vigs_drm_gem_unmap(struct vigs_drm_gem *gem)
263 munmap(gem->vaddr, gem->size);
267 int vigs_drm_gem_wait(struct vigs_drm_gem *gem)
269 struct drm_vigs_gem_wait req =
271 .handle = gem->handle,
275 ret = drmIoctl(gem->dev->fd, DRM_IOCTL_VIGS_GEM_WAIT, &req);
284 int vigs_drm_surface_create(struct vigs_drm_device *dev,
289 struct vigs_drm_surface **sfc)
291 struct vigs_drm_surface_impl *sfc_impl;
292 struct drm_vigs_create_surface req =
301 sfc_impl = calloc(sizeof(*sfc_impl), 1);
308 ret = drmIoctl(dev->fd, DRM_IOCTL_VIGS_CREATE_SURFACE, &req);
315 vigs_drm_gem_impl_init((struct vigs_drm_gem_impl*)sfc_impl,
321 sfc_impl->base.width = width;
322 sfc_impl->base.height = height;
323 sfc_impl->base.stride = stride;
324 sfc_impl->base.format = format;
325 sfc_impl->base.id = req.id;
327 *sfc = &sfc_impl->base;
339 int vigs_drm_surface_open(struct vigs_drm_device *dev,
341 struct vigs_drm_surface **sfc)
343 struct vigs_drm_surface_impl *sfc_impl;
344 struct drm_gem_open req =
348 struct drm_vigs_surface_info info_req;
351 sfc_impl = calloc(sizeof(*sfc_impl), 1);
358 ret = drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req);
365 info_req.handle = req.handle;
367 ret = drmIoctl(dev->fd, DRM_IOCTL_VIGS_SURFACE_INFO, &info_req);
374 vigs_drm_gem_impl_init((struct vigs_drm_gem_impl*)sfc_impl,
380 sfc_impl->base.width = info_req.width;
381 sfc_impl->base.height = info_req.height;
382 sfc_impl->base.stride = info_req.stride;
383 sfc_impl->base.format = info_req.format;
384 sfc_impl->base.id = info_req.id;
386 *sfc = &sfc_impl->base;
391 vigs_drm_gem_close(dev, req.handle);
400 int vigs_drm_surface_set_gpu_dirty(struct vigs_drm_surface *sfc)
402 struct drm_vigs_surface_set_gpu_dirty req =
404 .handle = sfc->gem.handle
408 ret = drmIoctl(sfc->gem.dev->fd, DRM_IOCTL_VIGS_SURFACE_SET_GPU_DIRTY, &req);
410 return (ret != 0) ? -errno : 0;
413 int vigs_drm_surface_start_access(struct vigs_drm_surface *sfc,
416 struct drm_vigs_surface_start_access req =
418 .address = (unsigned long)sfc->gem.vaddr,
423 ret = drmIoctl(sfc->gem.dev->fd, DRM_IOCTL_VIGS_SURFACE_START_ACCESS, &req);
425 return (ret != 0) ? -errno : 0;
428 int vigs_drm_surface_end_access(struct vigs_drm_surface *sfc,
431 struct drm_vigs_surface_end_access req =
433 .address = (unsigned long)sfc->gem.vaddr,
438 ret = drmIoctl(sfc->gem.dev->fd, DRM_IOCTL_VIGS_SURFACE_END_ACCESS, &req);
440 return (ret != 0) ? -errno : 0;
443 int vigs_drm_execbuffer_create(struct vigs_drm_device *dev,
445 struct vigs_drm_execbuffer **execbuffer)
447 struct vigs_drm_execbuffer_impl *execbuffer_impl;
448 struct drm_vigs_create_execbuffer req =
454 execbuffer_impl = calloc(sizeof(*execbuffer_impl), 1);
456 if (!execbuffer_impl) {
461 ret = drmIoctl(dev->fd, DRM_IOCTL_VIGS_CREATE_EXECBUFFER, &req);
468 vigs_drm_gem_impl_init((struct vigs_drm_gem_impl*)execbuffer_impl,
474 *execbuffer = &execbuffer_impl->base;
479 free(execbuffer_impl);
486 int vigs_drm_execbuffer_open(struct vigs_drm_device *dev,
488 struct vigs_drm_execbuffer **execbuffer)
490 struct vigs_drm_execbuffer_impl *execbuffer_impl;
491 struct drm_gem_open req =
497 execbuffer_impl = calloc(sizeof(*execbuffer_impl), 1);
499 if (!execbuffer_impl) {
504 ret = drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req);
511 vigs_drm_gem_impl_init((struct vigs_drm_gem_impl*)execbuffer_impl,
517 *execbuffer = &execbuffer_impl->base;
522 free(execbuffer_impl);
529 int vigs_drm_execbuffer_exec(struct vigs_drm_execbuffer *execbuffer)
531 struct drm_vigs_exec req =
533 .handle = execbuffer->gem.handle
537 ret = drmIoctl(execbuffer->gem.dev->fd, DRM_IOCTL_VIGS_EXEC, &req);
539 return (ret != 0) ? -errno : 0;
542 int vigs_drm_fence_create(struct vigs_drm_device *dev,
544 struct vigs_drm_fence **fence)
546 struct vigs_drm_fence_impl *fence_impl;
547 struct drm_vigs_create_fence req =
553 fence_impl = calloc(sizeof(*fence_impl), 1);
560 ret = drmIoctl(dev->fd, DRM_IOCTL_VIGS_CREATE_FENCE, &req);
567 atomic_set(&fence_impl->ref_count, 1);
568 fence_impl->base.dev = dev;
569 fence_impl->base.handle = req.handle;
570 fence_impl->base.seq = req.seq;
571 fence_impl->base.signaled = 0;
573 *fence = &fence_impl->base;
585 void vigs_drm_fence_ref(struct vigs_drm_fence *fence)
587 struct vigs_drm_fence_impl *fence_impl;
593 fence_impl = vigs_containerof(fence, struct vigs_drm_fence_impl, base);
595 atomic_inc(&fence_impl->ref_count);
598 void vigs_drm_fence_unref(struct vigs_drm_fence *fence)
600 struct vigs_drm_fence_impl *fence_impl;
601 struct drm_vigs_fence_unref req;
607 fence_impl = vigs_containerof(fence, struct vigs_drm_fence_impl, base);
609 assert(atomic_read(&fence_impl->ref_count) > 0);
610 if (!atomic_dec_and_test(&fence_impl->ref_count)) {
614 req.handle = fence->handle;
616 drmIoctl(fence->dev->fd, DRM_IOCTL_VIGS_FENCE_UNREF, &req);
621 int vigs_drm_fence_wait(struct vigs_drm_fence *fence)
623 struct drm_vigs_fence_wait req =
625 .handle = fence->handle
629 ret = drmIoctl(fence->dev->fd, DRM_IOCTL_VIGS_FENCE_WAIT, &req);
631 return (ret != 0) ? -errno : 0;
634 int vigs_drm_fence_check(struct vigs_drm_fence *fence)
636 struct drm_vigs_fence_signaled req =
638 .handle = fence->handle
642 if (fence->signaled) {
646 ret = drmIoctl(fence->dev->fd, DRM_IOCTL_VIGS_FENCE_SIGNALED, &req);
652 fence->signaled = req.signaled;