1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
4 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Rob Clark <robclark@freedesktop.org>
29 #include "freedreno_drmif.h"
30 #include "freedreno_priv.h"
34 static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
36 /* set buffer name, and add to table, call w/ table_lock held: */
37 static void set_name(struct fd_bo *bo, uint32_t name)
40 /* add ourself into the handle table: */
41 drmHashInsert(bo->dev->name_table, name, bo);
44 /* lookup a buffer, call w/ table_lock held: */
45 static struct fd_bo * lookup_bo(void *tbl, uint32_t key)
47 struct fd_bo *bo = NULL;
48 if (!drmHashLookup(tbl, key, (void **)&bo)) {
49 /* found, incr refcnt and return: */
55 /* allocate a new buffer object, call w/ table_lock held */
56 static struct fd_bo * bo_from_handle(struct fd_device *dev,
57 uint32_t size, uint32_t handle)
60 struct fd_bo *bo = calloc(1, sizeof(*bo));
62 struct drm_gem_close req = {
65 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
68 bo->dev = fd_device_ref(dev);
71 atomic_set(&bo->refcnt, 1);
72 /* add ourself into the handle table: */
73 drmHashInsert(dev->handle_table, handle, bo);
74 for (i = 0; i < ARRAY_SIZE(bo->list); i++)
75 list_inithead(&bo->list[i]);
79 static int set_memtype(struct fd_bo *bo, uint32_t flags)
81 struct drm_kgsl_gem_memtype req = {
83 .type = flags & DRM_FREEDRENO_GEM_TYPE_MEM_MASK,
86 return drmCommandWrite(bo->dev->fd, DRM_KGSL_GEM_SETMEMTYPE,
90 static int bo_alloc(struct fd_bo *bo)
93 struct drm_kgsl_gem_alloc req = {
98 /* if the buffer is already backed by pages then this
99 * doesn't actually do anything (other than giving us
102 ret = drmCommandWriteRead(bo->dev->fd, DRM_KGSL_GEM_ALLOC,
105 ERROR_MSG("alloc failed: %s", strerror(errno));
109 bo->offset = req.offset;
114 struct fd_bo * fd_bo_new(struct fd_device *dev,
115 uint32_t size, uint32_t flags)
117 struct drm_kgsl_gem_create req = {
118 .size = ALIGN(size, 4096),
120 struct fd_bo *bo = NULL;
122 if (drmCommandWriteRead(dev->fd, DRM_KGSL_GEM_CREATE,
123 &req, sizeof(req))) {
127 pthread_mutex_lock(&table_lock);
128 bo = bo_from_handle(dev, size, req.handle);
129 pthread_mutex_unlock(&table_lock);
134 if (set_memtype(bo, flags)) {
145 /* don't use this... it is just needed to get a bo from the
146 * framebuffer (pre-dmabuf)
148 struct fd_bo * fd_bo_from_fbdev(struct fd_pipe *pipe,
149 int fbfd, uint32_t size)
151 struct drm_kgsl_gem_create_fd req = {
156 if (drmCommandWriteRead(pipe->dev->fd, DRM_KGSL_GEM_CREATE_FD,
157 &req, sizeof(req))) {
161 pthread_mutex_lock(&table_lock);
162 bo = bo_from_handle(pipe->dev, size, req.handle);
164 /* this is fugly, but works around a bug in the kernel..
165 * priv->memdesc.size never gets set, so getbufinfo ioctl
166 * thinks the buffer hasn't be allocate and fails
168 if (bo && !fd_bo_gpuaddr(bo, 0)) {
169 void *fbmem = mmap(NULL, size, PROT_READ | PROT_WRITE,
170 MAP_SHARED, fbfd, 0);
171 struct kgsl_map_user_mem req = {
172 .memtype = KGSL_USER_MEM_TYPE_ADDR,
175 .hostptr = (unsigned long)fbmem,
178 ret = ioctl(pipe->fd, IOCTL_KGSL_MAP_USER_MEM, &req);
180 ERROR_MSG("mapping user mem failed: %s",
184 bo->gpuaddr = req.gpuaddr;
187 pthread_mutex_unlock(&table_lock);
191 pthread_mutex_unlock(&table_lock);
197 struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
199 struct drm_gem_open req = {
204 pthread_mutex_lock(&table_lock);
206 /* check name table first, to see if bo is already open: */
207 bo = lookup_bo(dev->name_table, name);
211 if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
212 ERROR_MSG("gem-open failed: %s", strerror(errno));
216 bo = lookup_bo(dev->handle_table, req.handle);
220 bo = bo_from_handle(dev, req.size, req.handle);
225 pthread_mutex_unlock(&table_lock);
230 struct fd_bo * fd_bo_ref(struct fd_bo *bo)
232 atomic_inc(&bo->refcnt);
236 void fd_bo_del(struct fd_bo *bo)
238 if (!atomic_dec_and_test(&bo->refcnt))
242 munmap(bo->map, bo->size);
245 struct drm_gem_close req = {
246 .handle = bo->handle,
248 pthread_mutex_lock(&table_lock);
249 drmHashDelete(bo->dev->handle_table, bo->handle);
251 drmHashDelete(bo->dev->name_table, bo->name);
252 drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
253 pthread_mutex_unlock(&table_lock);
256 fd_device_del(bo->dev);
260 int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
263 struct drm_gem_flink req = {
264 .handle = bo->handle,
268 ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
273 pthread_mutex_lock(&table_lock);
274 set_name(bo, req.name);
275 pthread_mutex_unlock(&table_lock);
283 uint32_t fd_bo_handle(struct fd_bo *bo)
288 uint32_t fd_bo_size(struct fd_bo *bo)
293 void * fd_bo_map(struct fd_bo *bo)
303 bo->map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
304 bo->dev->fd, bo->offset);
305 if (bo->map == MAP_FAILED) {
306 ERROR_MSG("mmap failed: %s", strerror(errno));
313 uint32_t fd_bo_gpuaddr(struct fd_bo *bo, uint32_t offset)
316 struct drm_kgsl_gem_bufinfo req = {
317 .handle = bo->handle,
326 ret = drmCommandWriteRead(bo->dev->fd, DRM_KGSL_GEM_GET_BUFINFO,
329 ERROR_MSG("get bufinfo failed: %s", strerror(errno));
333 bo->gpuaddr = req.gpuaddr[0];
335 return bo->gpuaddr + offset;
339 * Super-cheezy way to synchronization between mesa and ddx.. the
340 * SET_ACTIVE ioctl gives us a way to stash a 32b # w/ a GEM bo, and
341 * GET_BUFINFO gives us a way to retrieve it. We use this to stash
342 * the timestamp of the last ISSUEIBCMDS on the buffer.
344 * To avoid an obscene amount of syscalls, we:
345 * 1) Only set the timestamp for buffers w/ an flink name, ie.
346 * only buffers shared across processes. This is enough to
347 * catch the DRI2 buffers.
348 * 2) Only set the timestamp for buffers submitted to the 3d ring
349 * and only check the timestamps on buffers submitted to the
350 * 2d ring. This should be enough to handle synchronizing of
351 * presentation blit. We could do synchronization in the other
352 * direction too, but that would be problematic if we are using
353 * the 3d ring from DDX, since client side wouldn't know this.
355 * The waiting on timestamp happens before flush, and setting of
356 * timestamp happens after flush. It is transparent to the user
357 * of libdrm_freedreno as all the tracking of buffers happens via
361 void fb_bo_set_timestamp(struct fd_bo *bo, uint32_t timestamp)
364 struct drm_kgsl_gem_active req = {
365 .handle = bo->handle,
370 ret = drmCommandWrite(bo->dev->fd, DRM_KGSL_GEM_SET_ACTIVE,
373 ERROR_MSG("set active failed: %s", strerror(errno));
378 uint32_t fd_bo_get_timestamp(struct fd_bo *bo)
380 uint32_t timestamp = 0;
382 struct drm_kgsl_gem_bufinfo req = {
383 .handle = bo->handle,
387 ret = drmCommandWriteRead(bo->dev->fd, DRM_KGSL_GEM_GET_BUFINFO,
390 ERROR_MSG("get bufinfo failed: %s", strerror(errno));
394 timestamp = req.active;