1 /**************************************************************************
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, Tx., USA
5 * Copyright 2009 VMware, Inc., Palo Alto, CA., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 **************************************************************************/
30 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
41 #include "wsbm_pool.h"
43 #include "wsbm_priv.h"
44 #include "wsbm_manager.h"
45 #include "ttm/ttm_placement_user.h"
47 #define DRMRESTARTCOMMANDWRITE(_fd, _val, _arg, _ret) \
49 (_ret) = drmCommandWrite(_fd, _val, &(_arg), sizeof(_arg)); \
50 } while ((_ret) == -EAGAIN || (_ret) == -ERESTART); \
52 #define DRMRESTARTCOMMANDWRITEREAD(_fd, _val, _arg, _ret) \
54 (_ret) = drmCommandWriteRead(_fd, _val, &(_arg), sizeof(_arg)); \
55 } while ((_ret) == -EAGAIN || (_ret) == -ERESTART); \
58 * Buffer pool implementation using DRM buffer objects as wsbm buffer objects.
63 struct _WsbmBufStorage buf;
64 struct _WsbmCond event;
67 * Remains constant after creation.
70 uint64_t requestedSize;
75 * Protected by the kernel lock.
78 struct _WsbmKernelBuf kBuf;
81 * Protected by the mutex.
92 struct _WsbmBufferPool pool;
93 unsigned int pageSize;
94 unsigned int devOffset;
97 static inline struct _TTMPool *
98 ttmGetPool(struct _TTMBuffer *dBuf)
100 return containerOf(dBuf->buf.pool, struct _TTMPool, pool);
103 static inline struct _TTMBuffer *
104 ttmBuffer(struct _WsbmBufStorage *buf)
106 return containerOf(buf, struct _TTMBuffer, buf);
109 static struct _WsbmBufStorage *
110 pool_create(struct _WsbmBufferPool *pool,
111 unsigned long size, uint32_t placement, unsigned alignment)
113 struct _TTMBuffer *dBuf = (struct _TTMBuffer *)
114 calloc(1, sizeof(*dBuf));
115 struct _TTMPool *ttmPool = containerOf(pool, struct _TTMPool, pool);
117 unsigned pageSize = ttmPool->pageSize;
118 union ttm_pl_create_arg arg;
123 if ((alignment > pageSize) && (alignment % pageSize))
126 ret = wsbmBufStorageInit(&dBuf->buf, pool);
130 ret = WSBM_COND_INIT(&dBuf->event);
135 arg.req.placement = placement;
136 arg.req.page_alignment = alignment / pageSize;
138 DRMRESTARTCOMMANDWRITEREAD(pool->fd, ttmPool->devOffset + TTM_PL_CREATE,
144 dBuf->requestedSize = size;
145 dBuf->kBuf.gpuOffset = arg.rep.gpu_offset;
146 dBuf->mapHandle = arg.rep.map_handle;
147 dBuf->realSize = arg.rep.bo_size;
148 dBuf->kBuf.placement = arg.rep.placement;
149 dBuf->kBuf.handle = arg.rep.handle;
154 WSBM_COND_FREE(&dBuf->event);
156 wsbmBufStorageTakedown(&dBuf->buf);
162 static struct _WsbmBufStorage *
163 pool_reference(struct _WsbmBufferPool *pool, unsigned handle)
165 struct _TTMBuffer *dBuf = (struct _TTMBuffer *)calloc(1, sizeof(*dBuf));
166 struct _TTMPool *ttmPool = containerOf(pool, struct _TTMPool, pool);
167 union ttm_pl_reference_arg arg;
173 ret = wsbmBufStorageInit(&dBuf->buf, pool);
177 ret = WSBM_COND_INIT(&dBuf->event);
181 arg.req.handle = handle;
182 ret = drmCommandWriteRead(pool->fd, ttmPool->devOffset + TTM_PL_REFERENCE,
188 dBuf->requestedSize = arg.rep.bo_size;
189 dBuf->kBuf.gpuOffset = arg.rep.gpu_offset;
190 dBuf->mapHandle = arg.rep.map_handle;
191 dBuf->realSize = arg.rep.bo_size;
192 dBuf->kBuf.placement = arg.rep.placement;
193 dBuf->kBuf.handle = arg.rep.handle;
194 dBuf->kBuf.fence_type_mask = arg.rep.sync_object_arg;
199 WSBM_COND_FREE(&dBuf->event);
201 wsbmBufStorageTakedown(&dBuf->buf);
208 pool_destroy(struct _WsbmBufStorage **buf)
210 struct _TTMBuffer *dBuf = ttmBuffer(*buf);
211 struct _TTMPool *ttmPool = ttmGetPool(dBuf);
212 struct ttm_pl_reference_req arg;
215 if (dBuf->virtual != NULL) {
216 (void)munmap(dBuf->virtual, dBuf->requestedSize);
217 dBuf->virtual = NULL;
219 arg.handle = dBuf->kBuf.handle;
220 (void)drmCommandWrite(dBuf->buf.pool->fd,
221 ttmPool->devOffset + TTM_PL_UNREF,
224 WSBM_COND_FREE(&dBuf->event);
225 wsbmBufStorageTakedown(&dBuf->buf);
230 syncforcpu_locked(struct _WsbmBufStorage *buf, unsigned mode)
233 struct _TTMBuffer *dBuf = ttmBuffer(buf);
234 struct _TTMPool *ttmPool = ttmGetPool(dBuf);
235 unsigned int readers;
236 unsigned int writers;
239 while (dBuf->syncInProgress)
240 WSBM_COND_WAIT(&dBuf->event, &buf->mutex);
242 readers = dBuf->readers;
243 writers = dBuf->writers;
245 if ((mode & WSBM_SYNCCPU_READ) && (++dBuf->readers == 1))
246 kmode |= TTM_PL_SYNCCPU_MODE_READ;
248 if ((mode & WSBM_SYNCCPU_WRITE) && (++dBuf->writers == 1))
249 kmode |= TTM_PL_SYNCCPU_MODE_WRITE;
252 struct ttm_pl_synccpu_arg arg;
254 if (mode & WSBM_SYNCCPU_DONT_BLOCK)
255 kmode |= TTM_PL_SYNCCPU_MODE_NO_BLOCK;
257 dBuf->syncInProgress = 1;
260 * This might be a lengthy wait, so
264 WSBM_MUTEX_UNLOCK(&buf->mutex);
266 arg.handle = dBuf->kBuf.handle;
267 arg.access_mode = kmode;
268 arg.op = TTM_PL_SYNCCPU_OP_GRAB;
270 DRMRESTARTCOMMANDWRITE(dBuf->buf.pool->fd,
271 ttmPool->devOffset + TTM_PL_SYNCCPU, arg, ret);
273 WSBM_MUTEX_LOCK(&buf->mutex);
274 dBuf->syncInProgress = 0;
275 WSBM_COND_BROADCAST(&dBuf->event);
278 dBuf->readers = readers;
279 dBuf->writers = writers;
287 releasefromcpu_locked(struct _WsbmBufStorage *buf, unsigned mode)
290 struct _TTMBuffer *dBuf = ttmBuffer(buf);
291 struct _TTMPool *ttmPool = ttmGetPool(dBuf);
294 while (dBuf->syncInProgress)
295 WSBM_COND_WAIT(&dBuf->event, &buf->mutex);
297 if ((mode & WSBM_SYNCCPU_READ) && (--dBuf->readers == 0))
298 kmode |= TTM_PL_SYNCCPU_MODE_READ;
300 if ((mode & WSBM_SYNCCPU_WRITE) && (--dBuf->writers == 0))
301 kmode |= TTM_PL_SYNCCPU_MODE_WRITE;
304 struct ttm_pl_synccpu_arg arg;
306 arg.handle = dBuf->kBuf.handle;
307 arg.access_mode = kmode;
308 arg.op = TTM_PL_SYNCCPU_OP_RELEASE;
310 DRMRESTARTCOMMANDWRITE(dBuf->buf.pool->fd,
311 ttmPool->devOffset + TTM_PL_SYNCCPU, arg, ret);
319 pool_syncforcpu(struct _WsbmBufStorage *buf, unsigned mode)
323 WSBM_MUTEX_LOCK(&buf->mutex);
324 ret = syncforcpu_locked(buf, mode);
325 WSBM_MUTEX_UNLOCK(&buf->mutex);
330 pool_releasefromcpu(struct _WsbmBufStorage *buf, unsigned mode)
332 WSBM_MUTEX_LOCK(&buf->mutex);
333 (void)releasefromcpu_locked(buf, mode);
334 WSBM_MUTEX_UNLOCK(&buf->mutex);
338 pool_map(struct _WsbmBufStorage *buf, unsigned mode, void **virtual)
340 struct _TTMBuffer *dBuf = ttmBuffer(buf);
344 WSBM_MUTEX_LOCK(&buf->mutex);
347 * mmaps are expensive, so we only really unmap if
348 * we destroy the buffer.
351 if (dBuf->virtual == NULL) {
352 virt = mmap(0, dBuf->requestedSize,
353 PROT_READ | PROT_WRITE, MAP_SHARED,
354 buf->pool->fd, dBuf->mapHandle);
355 if (virt == MAP_FAILED) {
359 dBuf->virtual = virt;
362 *virtual = dBuf->virtual;
365 WSBM_MUTEX_UNLOCK(&buf->mutex);
371 pool_unmap(struct _WsbmBufStorage *buf)
377 pool_offset(struct _WsbmBufStorage *buf)
379 struct _TTMBuffer *dBuf = ttmBuffer(buf);
381 return dBuf->kBuf.gpuOffset;
385 pool_poolOffset(struct _WsbmBufStorage *buf)
391 pool_placement(struct _WsbmBufStorage *buf)
393 struct _TTMBuffer *dBuf = ttmBuffer(buf);
395 return dBuf->kBuf.placement;
399 pool_size(struct _WsbmBufStorage *buf)
401 struct _TTMBuffer *dBuf = ttmBuffer(buf);
403 return dBuf->realSize;
407 pool_fence(struct _WsbmBufStorage *buf, struct _WsbmFenceObject *fence)
410 * Noop. The kernel handles all fencing.
415 pool_waitIdle(struct _WsbmBufStorage *buf, int lazy)
417 struct _TTMBuffer *dBuf = ttmBuffer(buf);
418 struct _TTMPool *ttmPool = ttmGetPool(dBuf);
419 struct ttm_pl_waitidle_arg req;
420 struct _WsbmBufferPool *pool = buf->pool;
423 req.handle = dBuf->kBuf.handle;
424 req.mode = (lazy) ? TTM_PL_WAITIDLE_MODE_LAZY : 0;
426 DRMRESTARTCOMMANDWRITE(pool->fd, ttmPool->devOffset + TTM_PL_WAITIDLE,
433 pool_takedown(struct _WsbmBufferPool *pool)
435 struct _TTMPool *ttmPool = containerOf(pool, struct _TTMPool, pool);
441 pool_setStatus(struct _WsbmBufStorage *buf, uint32_t set_placement,
442 uint32_t clr_placement)
444 struct _TTMBuffer *dBuf = ttmBuffer(buf);
445 struct _TTMPool *ttmPool = ttmGetPool(dBuf);
446 union ttm_pl_setstatus_arg arg;
447 struct ttm_pl_setstatus_req *req = &arg.req;
448 struct ttm_pl_rep *rep = &arg.rep;
449 struct _WsbmBufferPool *pool = buf->pool;
452 req->handle = dBuf->kBuf.handle;
453 req->set_placement = set_placement;
454 req->clr_placement = clr_placement;
456 DRMRESTARTCOMMANDWRITEREAD(pool->fd,
457 ttmPool->devOffset + TTM_PL_SETSTATUS,
461 dBuf->kBuf.gpuOffset = rep->gpu_offset;
462 dBuf->kBuf.placement = rep->placement;
468 static struct _WsbmKernelBuf *
469 pool_kernel(struct _WsbmBufStorage *buf)
471 return (void *)&ttmBuffer(buf)->kBuf;
474 struct _WsbmBufferPool *
475 wsbmTTMPoolInit(int fd, unsigned int devOffset)
477 struct _TTMPool *ttmPool;
478 struct _WsbmBufferPool *pool;
480 ttmPool = (struct _TTMPool *)calloc(1, sizeof(*ttmPool));
485 ttmPool->pageSize = getpagesize();
486 ttmPool->devOffset = devOffset;
487 pool = &ttmPool->pool;
490 pool->map = &pool_map;
491 pool->unmap = &pool_unmap;
492 pool->syncforcpu = &pool_syncforcpu;
493 pool->releasefromcpu = &pool_releasefromcpu;
494 pool->destroy = &pool_destroy;
495 pool->offset = &pool_offset;
496 pool->poolOffset = &pool_poolOffset;
497 pool->placement = &pool_placement;
498 pool->size = &pool_size;
499 pool->create = &pool_create;
500 pool->fence = &pool_fence;
501 pool->kernel = &pool_kernel;
502 pool->validate = NULL;
503 pool->unvalidate = NULL;
504 pool->waitIdle = &pool_waitIdle;
505 pool->takeDown = &pool_takedown;
506 pool->createByReference = &pool_reference;
507 pool->setStatus = &pool_setStatus;