1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
4 * Copyright (C) 2011 Texas Instruments, Inc
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Rob Clark <rob@ti.com>
34 #include <linux/stddef.h>
35 #include <linux/types.h>
44 #include <xf86atomic.h>
47 #include "omap_drmif.h"
49 #define __round_mask(x, y) ((__typeof__(x))((y)-1))
50 #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
51 #define PAGE_SIZE 4096
53 static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
54 static void * dev_table;
60 /* The handle_table is used to track GEM bo handles associated w/
61 * this fd. This is needed, in particular, when importing
62 * dmabuf's because we don't want multiple 'struct omap_bo's
63 * floating around with the same handle. Otherwise, when the
64 * first one is omap_bo_del()'d the handle becomes no longer
65 * valid, and the remaining 'struct omap_bo's are left pointing
66 * to an invalid handle (and possible a GEM bo that is already
72 /* a GEM buffer object allocated from the DRM device */
74 struct omap_device *dev;
75 void *map; /* userspace mmap'ing (if there is one) */
78 uint32_t name; /* flink global handle (DRI2 name) */
79 uint64_t offset; /* offset to mmap() */
80 int fd; /* dmabuf handle */
84 static struct omap_device * omap_device_new_impl(int fd)
86 struct omap_device *dev = calloc(sizeof(*dev), 1);
90 atomic_set(&dev->refcnt, 1);
91 dev->handle_table = drmHashCreate();
95 drm_public struct omap_device * omap_device_new(int fd)
97 struct omap_device *dev = NULL;
99 pthread_mutex_lock(&table_lock);
102 dev_table = drmHashCreate();
104 if (drmHashLookup(dev_table, fd, (void **)&dev)) {
105 /* not found, create new device */
106 dev = omap_device_new_impl(fd);
107 drmHashInsert(dev_table, fd, dev);
109 /* found, just incr refcnt */
110 dev = omap_device_ref(dev);
113 pthread_mutex_unlock(&table_lock);
118 drm_public struct omap_device * omap_device_ref(struct omap_device *dev)
120 atomic_inc(&dev->refcnt);
124 drm_public void omap_device_del(struct omap_device *dev)
126 if (!atomic_dec_and_test(&dev->refcnt))
128 pthread_mutex_lock(&table_lock);
129 drmHashDestroy(dev->handle_table);
130 drmHashDelete(dev_table, dev->fd);
131 pthread_mutex_unlock(&table_lock);
136 omap_get_param(struct omap_device *dev, uint64_t param, uint64_t *value)
138 struct drm_omap_param req = {
143 ret = drmCommandWriteRead(dev->fd, DRM_OMAP_GET_PARAM, &req, sizeof(req));
154 omap_set_param(struct omap_device *dev, uint64_t param, uint64_t value)
156 struct drm_omap_param req = {
160 return drmCommandWrite(dev->fd, DRM_OMAP_SET_PARAM, &req, sizeof(req));
163 /* lookup a buffer from it's handle, call w/ table_lock held: */
164 static struct omap_bo * lookup_bo(struct omap_device *dev,
167 struct omap_bo *bo = NULL;
168 if (!drmHashLookup(dev->handle_table, handle, (void **)&bo)) {
169 /* found, incr refcnt and return: */
170 bo = omap_bo_ref(bo);
175 /* allocate a new buffer object, call w/ table_lock held */
176 static struct omap_bo * bo_from_handle(struct omap_device *dev,
179 struct omap_bo *bo = calloc(sizeof(*bo), 1);
181 struct drm_gem_close req = {
184 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
187 bo->dev = omap_device_ref(dev);
189 atomic_set(&bo->refcnt, 1);
190 /* add ourselves to the handle table: */
191 drmHashInsert(dev->handle_table, handle, bo);
195 /* allocate a new buffer object */
196 static struct omap_bo * omap_bo_new_impl(struct omap_device *dev,
197 union omap_gem_size size, uint32_t flags)
199 struct omap_bo *bo = NULL;
200 struct drm_omap_gem_new req = {
205 if (size.bytes == 0) {
209 if (drmCommandWriteRead(dev->fd, DRM_OMAP_GEM_NEW, &req, sizeof(req))) {
213 pthread_mutex_lock(&table_lock);
214 bo = bo_from_handle(dev, req.handle);
215 pthread_mutex_unlock(&table_lock);
217 if (flags & OMAP_BO_TILED) {
218 bo->size = round_up(size.tiled.width, PAGE_SIZE) * size.tiled.height;
220 bo->size = size.bytes;
231 /* allocate a new (un-tiled) buffer object */
232 drm_public struct omap_bo *
233 omap_bo_new(struct omap_device *dev, uint32_t size, uint32_t flags)
235 union omap_gem_size gsize = {
238 if (flags & OMAP_BO_TILED) {
241 return omap_bo_new_impl(dev, gsize, flags);
244 /* allocate a new buffer object */
245 drm_public struct omap_bo *
246 omap_bo_new_tiled(struct omap_device *dev, uint32_t width,
247 uint32_t height, uint32_t flags)
249 union omap_gem_size gsize = {
255 if (!(flags & OMAP_BO_TILED)) {
258 return omap_bo_new_impl(dev, gsize, flags);
261 drm_public struct omap_bo *omap_bo_ref(struct omap_bo *bo)
263 atomic_inc(&bo->refcnt);
267 /* get buffer info */
268 static int get_buffer_info(struct omap_bo *bo)
270 struct drm_omap_gem_info req = {
271 .handle = bo->handle,
273 int ret = drmCommandWriteRead(bo->dev->fd, DRM_OMAP_GEM_INFO,
279 /* really all we need for now is mmap offset */
280 bo->offset = req.offset;
286 /* import a buffer object from DRI2 name */
287 drm_public struct omap_bo *
288 omap_bo_from_name(struct omap_device *dev, uint32_t name)
290 struct omap_bo *bo = NULL;
291 struct drm_gem_open req = {
295 pthread_mutex_lock(&table_lock);
297 if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
301 bo = lookup_bo(dev, req.handle);
303 bo = bo_from_handle(dev, req.handle);
307 pthread_mutex_unlock(&table_lock);
312 pthread_mutex_unlock(&table_lock);
317 /* import a buffer from dmabuf fd, does not take ownership of the
318 * fd so caller should close() the fd when it is otherwise done
319 * with it (even if it is still using the 'struct omap_bo *')
321 drm_public struct omap_bo *
322 omap_bo_from_dmabuf(struct omap_device *dev, int fd)
324 struct omap_bo *bo = NULL;
325 struct drm_prime_handle req = {
330 pthread_mutex_lock(&table_lock);
332 ret = drmIoctl(dev->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &req);
337 bo = lookup_bo(dev, req.handle);
339 bo = bo_from_handle(dev, req.handle);
342 pthread_mutex_unlock(&table_lock);
347 pthread_mutex_unlock(&table_lock);
352 /* destroy a buffer object */
353 drm_public void omap_bo_del(struct omap_bo *bo)
359 if (!atomic_dec_and_test(&bo->refcnt))
363 munmap(bo->map, bo->size);
371 struct drm_gem_close req = {
372 .handle = bo->handle,
374 pthread_mutex_lock(&table_lock);
375 drmHashDelete(bo->dev->handle_table, bo->handle);
376 drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
377 pthread_mutex_unlock(&table_lock);
380 omap_device_del(bo->dev);
385 /* get the global flink/DRI2 buffer name */
386 drm_public int omap_bo_get_name(struct omap_bo *bo, uint32_t *name)
389 struct drm_gem_flink req = {
390 .handle = bo->handle,
394 ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
407 drm_public uint32_t omap_bo_handle(struct omap_bo *bo)
412 /* caller owns the dmabuf fd that is returned and is responsible
413 * to close() it when done
415 drm_public int omap_bo_dmabuf(struct omap_bo *bo)
418 struct drm_prime_handle req = {
419 .handle = bo->handle,
420 .flags = DRM_CLOEXEC,
424 ret = drmIoctl(bo->dev->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &req);
434 drm_public uint32_t omap_bo_size(struct omap_bo *bo)
442 drm_public void *omap_bo_map(struct omap_bo *bo)
449 bo->map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
450 MAP_SHARED, bo->dev->fd, bo->offset);
451 if (bo->map == MAP_FAILED) {
458 drm_public int omap_bo_cpu_prep(struct omap_bo *bo, enum omap_gem_op op)
460 struct drm_omap_gem_cpu_prep req = {
461 .handle = bo->handle,
464 return drmCommandWrite(bo->dev->fd,
465 DRM_OMAP_GEM_CPU_PREP, &req, sizeof(req));
468 drm_public int omap_bo_cpu_fini(struct omap_bo *bo, enum omap_gem_op op)
470 struct drm_omap_gem_cpu_fini req = {
471 .handle = bo->handle,
475 return drmCommandWrite(bo->dev->fd,
476 DRM_OMAP_GEM_CPU_FINI, &req, sizeof(req));