1 /**************************************************************************
3 * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
5 * Copyright 2009 VMware, Inc., Palo Alto, CA., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 **************************************************************************/
30 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
43 #include "wsbm_pool.h"
44 #include "wsbm_fencemgr.h"
45 #include "wsbm_manager.h"
47 #include "wsbm_priv.h"
50 * Malloced memory must be aligned to 16 bytes, since that's what
51 * the DMA bitblt requires.
54 #define WSBM_USER_ALIGN_ADD 16
55 #define WSBM_USER_ALIGN_SYSMEM(_val) \
56 ((void *)(((unsigned long) (_val) + 15) & ~15))
58 struct _WsbmUserBuffer
60 struct _WsbmBufStorage buf;
61 struct _WsbmKernelBuf kBuf;
63 /* Protected by the pool mutex */
65 struct _WsbmListHead lru;
66 struct _WsbmListHead delayed;
68 /* Protected by the buffer mutex */
71 unsigned long alignment;
73 struct _WsbmCond event;
74 uint32_t proposedPlacement;
75 uint32_t newFenceType;
80 struct _WsbmFenceObject *fence;
81 struct _WsbmMMNode *node;
83 struct _WsbmAtomic writers;
89 * Constant after initialization.
92 struct _WsbmBufferPool pool;
93 unsigned long agpOffset;
95 unsigned long agpSize;
96 unsigned long vramOffset;
97 unsigned long vramMap;
98 unsigned long vramSize;
99 struct _WsbmMutex mutex;
100 struct _WsbmListHead delayed;
101 struct _WsbmListHead vramLRU;
102 struct _WsbmListHead agpLRU;
103 struct _WsbmMM vramMM;
104 struct _WsbmMM agpMM;
105 uint32_t(*fenceTypes) (uint64_t);
108 static inline struct _WsbmUserPool *
109 userPool(struct _WsbmUserBuffer *buf)
111 return containerOf(buf->buf.pool, struct _WsbmUserPool, pool);
114 static inline struct _WsbmUserBuffer *
115 userBuf(struct _WsbmBufStorage *buf)
117 return containerOf(buf, struct _WsbmUserBuffer, buf);
121 waitIdleLocked(struct _WsbmBufStorage *buf, int lazy)
123 struct _WsbmUserBuffer *vBuf = userBuf(buf);
125 while (vBuf->unFenced || vBuf->fence != NULL) {
127 WSBM_COND_WAIT(&vBuf->event, &buf->mutex);
129 if (vBuf->fence != NULL) {
130 if (!wsbmFenceSignaled(vBuf->fence, vBuf->kBuf.fence_type_mask)) {
131 struct _WsbmFenceObject *fence =
132 wsbmFenceReference(vBuf->fence);
134 WSBM_MUTEX_UNLOCK(&buf->mutex);
135 (void)wsbmFenceFinish(fence, vBuf->kBuf.fence_type_mask,
137 WSBM_MUTEX_LOCK(&buf->mutex);
139 if (vBuf->fence == fence)
140 wsbmFenceUnreference(&vBuf->fence);
142 wsbmFenceUnreference(&fence);
144 wsbmFenceUnreference(&vBuf->fence);
151 pool_waitIdle(struct _WsbmBufStorage *buf, int lazy)
153 WSBM_MUTEX_UNLOCK(&buf->mutex);
154 waitIdleLocked(buf, lazy);
155 WSBM_MUTEX_UNLOCK(&buf->mutex);
161 evict_lru(struct _WsbmListHead *lru)
163 struct _WsbmUserBuffer *vBuf;
164 struct _WsbmUserPool *p;
165 struct _WsbmListHead *list = lru->next;
172 vBuf = WSBMLISTENTRY(list, struct _WsbmUserBuffer, lru);
174 WSBM_MUTEX_UNLOCK(&p->mutex);
175 WSBM_MUTEX_LOCK(&vBuf->buf.mutex);
176 WSBM_MUTEX_LOCK(&p->mutex);
178 vBuf->sysmem = malloc(vBuf->size + WSBM_USER_ALIGN_ADD);
185 (void)wsbmFenceFinish(vBuf->fence, vBuf->kBuf.fence_type_mask, 0);
186 wsbmFenceUnreference(&vBuf->fence);
188 memcpy(WSBM_USER_ALIGN_SYSMEM(vBuf->sysmem), vBuf->map, vBuf->size);
189 WSBMLISTDELINIT(&vBuf->lru);
190 vBuf->kBuf.placement = WSBM_PL_FLAG_SYSTEM;
191 vBuf->map = WSBM_USER_ALIGN_SYSMEM(vBuf->sysmem);
194 * FIXME: Free memory.
199 WSBM_MUTEX_UNLOCK(&vBuf->buf.mutex);
203 static struct _WsbmBufStorage *
204 pool_create(struct _WsbmBufferPool *pool,
205 unsigned long size, uint32_t placement, unsigned alignment)
207 struct _WsbmUserPool *p = containerOf(pool, struct _WsbmUserPool, pool);
208 struct _WsbmUserBuffer *vBuf = calloc(1, sizeof(*vBuf));
213 wsbmBufStorageInit(&vBuf->buf, pool);
215 vBuf->proposedPlacement = placement;
217 vBuf->alignment = alignment;
219 WSBMINITLISTHEAD(&vBuf->lru);
220 WSBMINITLISTHEAD(&vBuf->delayed);
221 WSBM_MUTEX_LOCK(&p->mutex);
223 if (placement & WSBM_PL_FLAG_TT) {
224 vBuf->node = wsbmMMSearchFree(&p->agpMM, size, alignment, 1);
226 vBuf->node = wsbmMMGetBlock(vBuf->node, size, alignment);
229 vBuf->kBuf.placement = WSBM_PL_FLAG_TT;
230 vBuf->kBuf.gpuOffset = p->agpOffset + vBuf->node->start;
231 vBuf->map = (void *)(p->agpMap + vBuf->node->start);
232 WSBMLISTADDTAIL(&vBuf->lru, &p->agpLRU);
237 if (placement & WSBM_PL_FLAG_VRAM) {
238 vBuf->node = wsbmMMSearchFree(&p->vramMM, size, alignment, 1);
240 vBuf->node = wsbmMMGetBlock(vBuf->node, size, alignment);
243 vBuf->kBuf.placement = WSBM_PL_FLAG_VRAM;
244 vBuf->kBuf.gpuOffset = p->vramOffset + vBuf->node->start;
245 vBuf->map = (void *)(p->vramMap + vBuf->node->start);
246 WSBMLISTADDTAIL(&vBuf->lru, &p->vramLRU);
251 if ((placement & WSBM_PL_FLAG_NO_EVICT)
252 && !(placement & WSBM_PL_FLAG_SYSTEM)) {
253 WSBM_MUTEX_UNLOCK(&p->mutex);
257 vBuf->sysmem = malloc(size + WSBM_USER_ALIGN_ADD);
258 vBuf->kBuf.placement = WSBM_PL_FLAG_SYSTEM;
259 vBuf->map = WSBM_USER_ALIGN_SYSMEM(vBuf->sysmem);
263 WSBM_MUTEX_UNLOCK(&p->mutex);
264 if (vBuf->sysmem != NULL
265 || (!(vBuf->kBuf.placement & WSBM_PL_FLAG_SYSTEM)))
273 pool_validate(struct _WsbmBufStorage *buf, uint64_t set_flags,
276 struct _WsbmUserBuffer *vBuf = userBuf(buf);
277 struct _WsbmUserPool *p = userPool(vBuf);
280 WSBM_MUTEX_LOCK(&buf->mutex);
282 while (wsbmAtomicRead(&vBuf->writers) != 0)
283 WSBM_COND_WAIT(&vBuf->event, &buf->mutex);
287 WSBM_MUTEX_LOCK(&p->mutex);
288 WSBMLISTDELINIT(&vBuf->lru);
290 vBuf->proposedPlacement =
291 (vBuf->proposedPlacement | set_flags) & ~clr_flags;
293 if ((vBuf->proposedPlacement & vBuf->kBuf.placement & WSBM_PL_MASK_MEM) ==
294 vBuf->kBuf.placement) {
300 * We're moving to another memory region, so evict first and we'll
301 * do a sw copy to the other region.
304 if (!(vBuf->kBuf.placement & WSBM_PL_FLAG_SYSTEM)) {
305 struct _WsbmListHead tmpLRU;
307 WSBMINITLISTHEAD(&tmpLRU);
308 WSBMLISTADDTAIL(&tmpLRU, &vBuf->lru);
309 err = evict_lru(&tmpLRU);
314 if (vBuf->proposedPlacement & WSBM_PL_FLAG_TT) {
317 wsbmMMSearchFree(&p->agpMM, vBuf->size, vBuf->alignment, 1);
320 wsbmMMGetBlock(vBuf->node, vBuf->size, vBuf->alignment);
323 vBuf->kBuf.placement = WSBM_PL_FLAG_TT;
324 vBuf->kBuf.gpuOffset = p->agpOffset + vBuf->node->start;
325 vBuf->map = (void *)(p->agpMap + vBuf->node->start);
326 memcpy(vBuf->map, WSBM_USER_ALIGN_SYSMEM(vBuf->sysmem),
331 } while (evict_lru(&p->agpLRU) == 0);
334 if (vBuf->proposedPlacement & WSBM_PL_FLAG_VRAM) {
337 wsbmMMSearchFree(&p->vramMM, vBuf->size, vBuf->alignment, 1);
340 wsbmMMGetBlock(vBuf->node, vBuf->size, vBuf->alignment);
343 vBuf->kBuf.placement = WSBM_PL_FLAG_VRAM;
344 vBuf->kBuf.gpuOffset = p->vramOffset + vBuf->node->start;
345 vBuf->map = (void *)(p->vramMap + vBuf->node->start);
346 memcpy(vBuf->map, WSBM_USER_ALIGN_SYSMEM(vBuf->sysmem),
351 } while (evict_lru(&p->vramLRU) == 0);
354 if (vBuf->proposedPlacement & WSBM_PL_FLAG_SYSTEM)
360 vBuf->newFenceType = p->fenceTypes(set_flags);
361 WSBM_MUTEX_UNLOCK(&p->mutex);
362 WSBM_MUTEX_UNLOCK(&buf->mutex);
367 pool_setStatus(struct _WsbmBufStorage *buf,
368 uint32_t set_placement, uint32_t clr_placement)
370 struct _WsbmUserBuffer *vBuf = userBuf(buf);
373 ret = pool_validate(buf, set_placement, clr_placement);
379 release_delayed_buffers(struct _WsbmUserPool *p)
381 struct _WsbmUserBuffer *vBuf;
382 struct _WsbmListHead *list, *next;
384 WSBM_MUTEX_LOCK(&p->mutex);
387 * We don't need to take the buffer mutexes in this loop, since
388 * the only other user is the evict_lru function, which has the
389 * pool mutex held when accessing the buffer fence member.
392 WSBMLISTFOREACHSAFE(list, next, &p->delayed) {
393 vBuf = WSBMLISTENTRY(list, struct _WsbmUserBuffer, delayed);
396 || wsbmFenceSignaled(vBuf->fence, vBuf->kBuf.fence_type_mask)) {
398 wsbmFenceUnreference(&vBuf->fence);
400 WSBMLISTDEL(&vBuf->delayed);
401 WSBMLISTDEL(&vBuf->lru);
403 if ((vBuf->kBuf.placement & WSBM_PL_FLAG_SYSTEM) == 0)
404 wsbmMMPutBlock(vBuf->node);
413 WSBM_MUTEX_UNLOCK(&p->mutex);
417 pool_destroy(struct _WsbmBufStorage **buf)
419 struct _WsbmUserBuffer *vBuf = userBuf(*buf);
420 struct _WsbmUserPool *p = userPool(vBuf);
424 WSBM_MUTEX_LOCK(&vBuf->buf.mutex);
426 && !wsbmFenceSignaled(vBuf->fence, vBuf->kBuf.fence_type_mask))) {
427 WSBM_MUTEX_LOCK(&p->mutex);
428 WSBMLISTADDTAIL(&vBuf->delayed, &p->delayed);
429 WSBM_MUTEX_UNLOCK(&p->mutex);
430 WSBM_MUTEX_UNLOCK(&vBuf->buf.mutex);
435 wsbmFenceUnreference(&vBuf->fence);
437 WSBM_MUTEX_LOCK(&p->mutex);
438 WSBMLISTDEL(&vBuf->lru);
439 WSBM_MUTEX_UNLOCK(&p->mutex);
441 if (!(vBuf->kBuf.placement & WSBM_PL_FLAG_SYSTEM))
442 wsbmMMPutBlock(vBuf->node);
451 pool_map(struct _WsbmBufStorage *buf, unsigned mode, void **virtual)
453 struct _WsbmUserBuffer *vBuf = userBuf(buf);
455 *virtual = vBuf->map;
460 pool_unmap(struct _WsbmBufStorage *buf)
466 pool_releaseFromCpu(struct _WsbmBufStorage *buf, unsigned mode)
468 struct _WsbmUserBuffer *vBuf = userBuf(buf);
470 if (wsbmAtomicDecZero(&vBuf->writers))
471 WSBM_COND_BROADCAST(&vBuf->event);
476 pool_syncForCpu(struct _WsbmBufStorage *buf, unsigned mode)
478 struct _WsbmUserBuffer *vBuf = userBuf(buf);
481 WSBM_MUTEX_LOCK(&buf->mutex);
482 if ((mode & WSBM_SYNCCPU_DONT_BLOCK)) {
484 if (vBuf->unFenced) {
490 if ((vBuf->fence == NULL) ||
491 wsbmFenceSignaled(vBuf->fence, vBuf->kBuf.fence_type_mask)) {
492 wsbmFenceUnreference(&vBuf->fence);
493 wsbmAtomicInc(&vBuf->writers);
499 waitIdleLocked(buf, 0);
500 wsbmAtomicInc(&vBuf->writers);
502 WSBM_MUTEX_UNLOCK(&buf->mutex);
507 pool_offset(struct _WsbmBufStorage *buf)
509 return userBuf(buf)->kBuf.gpuOffset;
513 pool_poolOffset(struct _WsbmBufStorage *buf)
519 pool_size(struct _WsbmBufStorage *buf)
521 return userBuf(buf)->size;
525 pool_fence(struct _WsbmBufStorage *buf, struct _WsbmFenceObject *fence)
527 struct _WsbmUserBuffer *vBuf = userBuf(buf);
528 struct _WsbmUserPool *p = userPool(vBuf);
530 WSBM_MUTEX_LOCK(&buf->mutex);
533 wsbmFenceUnreference(&vBuf->fence);
535 vBuf->fence = wsbmFenceReference(fence);
537 vBuf->kBuf.fence_type_mask = vBuf->newFenceType;
539 WSBM_COND_BROADCAST(&vBuf->event);
540 WSBM_MUTEX_LOCK(&p->mutex);
541 if (vBuf->kBuf.placement & WSBM_PL_FLAG_VRAM)
542 WSBMLISTADDTAIL(&vBuf->lru, &p->vramLRU);
543 else if (vBuf->kBuf.placement & WSBM_PL_FLAG_TT)
544 WSBMLISTADDTAIL(&vBuf->lru, &p->agpLRU);
545 WSBM_MUTEX_UNLOCK(&p->mutex);
546 WSBM_MUTEX_UNLOCK(&buf->mutex);
550 pool_unvalidate(struct _WsbmBufStorage *buf)
552 struct _WsbmUserBuffer *vBuf = userBuf(buf);
553 struct _WsbmUserPool *p = userPool(vBuf);
555 WSBM_MUTEX_LOCK(&buf->mutex);
561 WSBM_COND_BROADCAST(&vBuf->event);
562 WSBM_MUTEX_LOCK(&p->mutex);
563 if (vBuf->kBuf.placement & WSBM_PL_FLAG_VRAM)
564 WSBMLISTADDTAIL(&vBuf->lru, &p->vramLRU);
565 else if (vBuf->kBuf.placement & WSBM_PL_FLAG_TT)
566 WSBMLISTADDTAIL(&vBuf->lru, &p->agpLRU);
567 WSBM_MUTEX_UNLOCK(&p->mutex);
571 WSBM_MUTEX_UNLOCK(&buf->mutex);
574 static struct _WsbmKernelBuf *
575 pool_kernel(struct _WsbmBufStorage *buf)
577 struct _WsbmUserBuffer *vBuf = userBuf(buf);
583 pool_takedown(struct _WsbmBufferPool *pool)
585 struct _WsbmUserPool *p = containerOf(pool, struct _WsbmUserPool, pool);
589 release_delayed_buffers(p);
590 WSBM_MUTEX_LOCK(&p->mutex);
591 empty = (p->delayed.next == &p->delayed);
592 WSBM_MUTEX_UNLOCK(&p->mutex);
598 WSBM_MUTEX_LOCK(&p->mutex);
600 while (evict_lru(&p->vramLRU) == 0) ;
601 while (evict_lru(&p->agpLRU) == 0) ;
603 WSBM_MUTEX_UNLOCK(&p->mutex);
605 wsbmMMtakedown(&p->agpMM);
606 wsbmMMtakedown(&p->vramMM);
612 wsbmUserPoolClean(struct _WsbmBufferPool *pool, int cleanVram, int cleanAgp)
614 struct _WsbmUserPool *p = containerOf(pool, struct _WsbmUserPool, pool);
616 WSBM_MUTEX_LOCK(&p->mutex);
618 while (evict_lru(&p->vramLRU) == 0) ;
620 while (evict_lru(&p->agpLRU) == 0) ;
621 WSBM_MUTEX_UNLOCK(&p->mutex);
624 struct _WsbmBufferPool *
625 wsbmUserPoolInit(void *vramAddr,
626 unsigned long vramStart, unsigned long vramSize,
627 void *agpAddr, unsigned long agpStart,
628 unsigned long agpSize,
629 uint32_t(*fenceTypes) (uint64_t set_flags))
631 struct _WsbmBufferPool *pool;
632 struct _WsbmUserPool *uPool;
635 uPool = calloc(1, sizeof(*uPool));
639 ret = WSBM_MUTEX_INIT(&uPool->mutex);
643 ret = wsbmMMinit(&uPool->vramMM, 0, vramSize);
647 ret = wsbmMMinit(&uPool->agpMM, 0, agpSize);
651 WSBMINITLISTHEAD(&uPool->delayed);
652 WSBMINITLISTHEAD(&uPool->vramLRU);
653 WSBMINITLISTHEAD(&uPool->agpLRU);
655 uPool->agpOffset = agpStart;
656 uPool->agpMap = (unsigned long)agpAddr;
657 uPool->vramOffset = vramStart;
658 uPool->vramMap = (unsigned long)vramAddr;
659 uPool->fenceTypes = fenceTypes;
662 pool->map = &pool_map;
663 pool->unmap = &pool_unmap;
664 pool->destroy = &pool_destroy;
665 pool->offset = &pool_offset;
666 pool->poolOffset = &pool_poolOffset;
667 pool->size = &pool_size;
668 pool->create = &pool_create;
669 pool->fence = &pool_fence;
670 pool->unvalidate = &pool_unvalidate;
671 pool->kernel = &pool_kernel;
672 pool->validate = &pool_validate;
673 pool->waitIdle = &pool_waitIdle;
674 pool->takeDown = &pool_takedown;
675 pool->setStatus = &pool_setStatus;
676 pool->syncforcpu = &pool_syncForCpu;
677 pool->releasefromcpu = &pool_releaseFromCpu;
682 wsbmMMtakedown(&uPool->vramMM);
684 WSBM_MUTEX_FREE(&uPool->mutex);