1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
49 #include <sys/ioctl.h>
52 #include <sys/types.h>
55 #include "libdrm_lists.h"
56 #include "intel_atomic.h"
57 #include "intel_bufmgr.h"
58 #include "intel_bufmgr_priv.h"
59 #include "intel_chipset.h"
64 #define DBG(...) do { \
65 if (bufmgr_gem->bufmgr.debug) \
66 fprintf(stderr, __VA_ARGS__); \
69 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
71 struct drm_intel_gem_bo_bucket {
76 /* Only cache objects up to 64MB. Bigger than that, and the rounding of the
77 * size makes many operations fail that wouldn't otherwise.
79 #define DRM_INTEL_GEM_BO_BUCKETS 14
80 typedef struct _drm_intel_bufmgr_gem {
81 drm_intel_bufmgr bufmgr;
89 struct drm_i915_gem_exec_object *exec_objects;
90 drm_intel_bo **exec_bos;
94 /** Array of lists of cached gem objects of power-of-two sizes */
95 struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
101 } drm_intel_bufmgr_gem;
103 struct _drm_intel_bo_gem {
111 * Kenel-assigned global name for this object
113 unsigned int global_name;
116 * Index of the buffer within the validation list while preparing a
117 * batchbuffer execution.
122 * Current tiling mode
124 uint32_t tiling_mode;
125 uint32_t swizzle_mode;
129 /** Array passed to the DRM containing relocation information. */
130 struct drm_i915_gem_relocation_entry *relocs;
131 /** Array of bos corresponding to relocs[i].target_handle */
132 drm_intel_bo **reloc_target_bo;
133 /** Number of entries in relocs */
135 /** Mapped address for the buffer, saved across map/unmap cycles */
137 /** GTT virtual address for the buffer, saved across map/unmap cycles */
144 * Boolean of whether this BO and its children have been included in
145 * the current drm_intel_bufmgr_check_aperture_space() total.
147 char included_in_check_aperture;
150 * Boolean of whether this buffer has been used as a relocation
151 * target and had its size accounted for, and thus can't have any
152 * further relocations added to it.
154 char used_as_reloc_target;
157 * Boolean of whether this buffer can be re-used
162 * Size in bytes of this buffer and its relocation descendents.
164 * Used to avoid costly tree walking in
165 * drm_intel_bufmgr_check_aperture in the common case.
170 * Number of potential fence registers required by this buffer and its
173 int reloc_tree_fences;
177 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
180 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
183 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
184 uint32_t * swizzle_mode);
187 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
190 static void drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo);
192 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
194 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
197 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
198 uint32_t *tiling_mode)
200 unsigned long min_size, max_size;
203 if (*tiling_mode == I915_TILING_NONE)
206 /* 965+ just need multiples of page size for tiling */
207 if (IS_I965G(bufmgr_gem))
208 return ROUND_UP_TO(size, 4096);
210 /* Older chips need powers of two, of at least 512k or 1M */
211 if (IS_I9XX(bufmgr_gem)) {
212 min_size = 1024*1024;
213 max_size = 128*1024*1024;
216 max_size = 64*1024*1024;
219 if (size > max_size) {
220 *tiling_mode = I915_TILING_NONE;
224 for (i = min_size; i < size; i <<= 1)
231 * Round a given pitch up to the minimum required for X tiling on a
232 * given chip. We use 512 as the minimum to allow for a later tiling
236 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
237 unsigned long pitch, uint32_t tiling_mode)
239 unsigned long tile_width = 512;
242 if (tiling_mode == I915_TILING_NONE)
243 return ROUND_UP_TO(pitch, tile_width);
245 /* 965 is flexible */
246 if (IS_I965G(bufmgr_gem))
247 return ROUND_UP_TO(pitch, tile_width);
249 /* Pre-965 needs power of two tile width */
250 for (i = tile_width; i < pitch; i <<= 1)
256 static struct drm_intel_gem_bo_bucket *
257 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
262 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
263 struct drm_intel_gem_bo_bucket *bucket =
264 &bufmgr_gem->cache_bucket[i];
265 if (bucket->size >= size) {
274 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
278 for (i = 0; i < bufmgr_gem->exec_count; i++) {
279 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
280 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
282 if (bo_gem->relocs == NULL) {
283 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
288 for (j = 0; j < bo_gem->reloc_count; j++) {
289 drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j];
290 drm_intel_bo_gem *target_gem =
291 (drm_intel_bo_gem *) target_bo;
293 DBG("%2d: %d (%s)@0x%08llx -> "
294 "%d (%s)@0x%08lx + 0x%08x\n",
296 bo_gem->gem_handle, bo_gem->name,
297 (unsigned long long)bo_gem->relocs[j].offset,
298 target_gem->gem_handle,
301 bo_gem->relocs[j].delta);
307 drm_intel_gem_bo_reference(drm_intel_bo *bo)
309 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
311 assert(atomic_read(&bo_gem->refcount) > 0);
312 atomic_inc(&bo_gem->refcount);
316 * Adds the given buffer to the list of buffers to be validated (moved into the
317 * appropriate memory type) with the next batch submission.
319 * If a buffer is validated multiple times in a batch submission, it ends up
320 * with the intersection of the memory type flags and the union of the
324 drm_intel_add_validate_buffer(drm_intel_bo *bo)
326 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
327 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
330 if (bo_gem->validate_index != -1)
333 /* Extend the array of validation entries as necessary. */
334 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
335 int new_size = bufmgr_gem->exec_size * 2;
340 bufmgr_gem->exec_objects =
341 realloc(bufmgr_gem->exec_objects,
342 sizeof(*bufmgr_gem->exec_objects) * new_size);
343 bufmgr_gem->exec_bos =
344 realloc(bufmgr_gem->exec_bos,
345 sizeof(*bufmgr_gem->exec_bos) * new_size);
346 bufmgr_gem->exec_size = new_size;
349 index = bufmgr_gem->exec_count;
350 bo_gem->validate_index = index;
351 /* Fill in array entry */
352 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
353 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
354 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
355 bufmgr_gem->exec_objects[index].alignment = 0;
356 bufmgr_gem->exec_objects[index].offset = 0;
357 bufmgr_gem->exec_bos[index] = bo;
358 drm_intel_gem_bo_reference(bo);
359 bufmgr_gem->exec_count++;
362 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
366 drm_intel_setup_reloc_list(drm_intel_bo *bo)
368 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
369 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
370 unsigned int max_relocs = bufmgr_gem->max_relocs;
372 if (bo->size / 4 < max_relocs)
373 max_relocs = bo->size / 4;
375 bo_gem->relocs = malloc(max_relocs *
376 sizeof(struct drm_i915_gem_relocation_entry));
377 bo_gem->reloc_target_bo = malloc(max_relocs * sizeof(drm_intel_bo *));
383 drm_intel_gem_bo_busy(drm_intel_bo *bo)
385 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
386 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
387 struct drm_i915_gem_busy busy;
390 memset(&busy, 0, sizeof(busy));
391 busy.handle = bo_gem->gem_handle;
393 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
395 return (ret == 0 && busy.busy);
399 drm_intel_gem_bo_madvise(drm_intel_bufmgr_gem *bufmgr_gem,
400 drm_intel_bo_gem *bo_gem, int state)
402 struct drm_i915_gem_madvise madv;
404 madv.handle = bo_gem->gem_handle;
407 ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
409 return madv.retained;
412 /* drop the oldest entries that have been purged by the kernel */
414 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
415 struct drm_intel_gem_bo_bucket *bucket)
417 while (!DRMLISTEMPTY(&bucket->head)) {
418 drm_intel_bo_gem *bo_gem;
420 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
421 bucket->head.next, head);
422 if (drm_intel_gem_bo_madvise
423 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
426 DRMLISTDEL(&bo_gem->head);
427 drm_intel_gem_bo_free(&bo_gem->bo);
431 static drm_intel_bo *
432 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
437 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
438 drm_intel_bo_gem *bo_gem;
439 unsigned int page_size = getpagesize();
441 struct drm_intel_gem_bo_bucket *bucket;
442 int alloc_from_cache;
443 unsigned long bo_size;
446 if (flags & BO_ALLOC_FOR_RENDER)
449 /* Round the allocated size up to a power of two number of pages. */
450 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
452 /* If we don't have caching at this size, don't actually round the
455 if (bucket == NULL) {
457 if (bo_size < page_size)
460 bo_size = bucket->size;
463 pthread_mutex_lock(&bufmgr_gem->lock);
464 /* Get a buffer out of the cache if available */
466 alloc_from_cache = 0;
467 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
469 /* Allocate new render-target BOs from the tail (MRU)
470 * of the list, as it will likely be hot in the GPU
471 * cache and in the aperture for us.
473 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
474 bucket->head.prev, head);
475 DRMLISTDEL(&bo_gem->head);
476 alloc_from_cache = 1;
478 /* For non-render-target BOs (where we're probably
479 * going to map it first thing in order to fill it
480 * with data), check if the last BO in the cache is
481 * unbusy, and only reuse in that case. Otherwise,
482 * allocating a new buffer is probably faster than
483 * waiting for the GPU to finish.
485 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
486 bucket->head.next, head);
487 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
488 alloc_from_cache = 1;
489 DRMLISTDEL(&bo_gem->head);
493 if (alloc_from_cache) {
494 if (!drm_intel_gem_bo_madvise
495 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
496 drm_intel_gem_bo_free(&bo_gem->bo);
497 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
503 pthread_mutex_unlock(&bufmgr_gem->lock);
505 if (!alloc_from_cache) {
506 struct drm_i915_gem_create create;
508 bo_gem = calloc(1, sizeof(*bo_gem));
512 bo_gem->bo.size = bo_size;
513 memset(&create, 0, sizeof(create));
514 create.size = bo_size;
516 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
517 bo_gem->gem_handle = create.handle;
518 bo_gem->bo.handle = bo_gem->gem_handle;
523 bo_gem->bo.bufmgr = bufmgr;
527 atomic_set(&bo_gem->refcount, 1);
528 bo_gem->validate_index = -1;
529 bo_gem->reloc_tree_size = bo_gem->bo.size;
530 bo_gem->reloc_tree_fences = 0;
531 bo_gem->used_as_reloc_target = 0;
532 bo_gem->tiling_mode = I915_TILING_NONE;
533 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
534 bo_gem->reusable = 1;
536 DBG("bo_create: buf %d (%s) %ldb\n",
537 bo_gem->gem_handle, bo_gem->name, size);
542 static drm_intel_bo *
543 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
546 unsigned int alignment)
548 assert(alignment <= 4096);
549 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
550 BO_ALLOC_FOR_RENDER);
553 static drm_intel_bo *
554 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
557 unsigned int alignment)
559 assert(alignment <= 4096);
560 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0);
563 static drm_intel_bo *
564 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
565 int x, int y, int cpp, uint32_t *tiling_mode,
566 unsigned long *pitch, unsigned long flags)
568 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
570 unsigned long size, stride, aligned_y = y;
573 if (*tiling_mode == I915_TILING_NONE)
574 aligned_y = ALIGN(y, 2);
575 else if (*tiling_mode == I915_TILING_X)
576 aligned_y = ALIGN(y, 8);
577 else if (*tiling_mode == I915_TILING_Y)
578 aligned_y = ALIGN(y, 32);
581 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, *tiling_mode);
582 size = stride * aligned_y;
583 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
585 bo = drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags);
589 ret = drm_intel_gem_bo_set_tiling(bo, tiling_mode, stride);
591 drm_intel_gem_bo_unreference(bo);
601 * Returns a drm_intel_bo wrapping the given buffer object handle.
603 * This can be used when one application needs to pass a buffer object
607 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
611 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
612 drm_intel_bo_gem *bo_gem;
614 struct drm_gem_open open_arg;
615 struct drm_i915_gem_get_tiling get_tiling;
617 bo_gem = calloc(1, sizeof(*bo_gem));
621 memset(&open_arg, 0, sizeof(open_arg));
622 open_arg.name = handle;
623 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
625 fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
626 name, handle, strerror(errno));
630 bo_gem->bo.size = open_arg.size;
631 bo_gem->bo.offset = 0;
632 bo_gem->bo.virtual = NULL;
633 bo_gem->bo.bufmgr = bufmgr;
635 atomic_set(&bo_gem->refcount, 1);
636 bo_gem->validate_index = -1;
637 bo_gem->gem_handle = open_arg.handle;
638 bo_gem->global_name = handle;
639 bo_gem->reusable = 0;
641 memset(&get_tiling, 0, sizeof(get_tiling));
642 get_tiling.handle = bo_gem->gem_handle;
643 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
645 drm_intel_gem_bo_unreference(&bo_gem->bo);
648 bo_gem->tiling_mode = get_tiling.tiling_mode;
649 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
650 if (bo_gem->tiling_mode == I915_TILING_NONE)
651 bo_gem->reloc_tree_fences = 0;
653 bo_gem->reloc_tree_fences = 1;
655 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
661 drm_intel_gem_bo_free(drm_intel_bo *bo)
663 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
664 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
665 struct drm_gem_close close;
668 if (bo_gem->mem_virtual)
669 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
670 if (bo_gem->gtt_virtual)
671 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
673 free(bo_gem->reloc_target_bo);
674 free(bo_gem->relocs);
676 /* Close this object */
677 memset(&close, 0, sizeof(close));
678 close.handle = bo_gem->gem_handle;
679 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
682 "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
683 bo_gem->gem_handle, bo_gem->name, strerror(errno));
688 /** Frees all cached buffers significantly older than @time. */
690 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
694 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
695 struct drm_intel_gem_bo_bucket *bucket =
696 &bufmgr_gem->cache_bucket[i];
698 while (!DRMLISTEMPTY(&bucket->head)) {
699 drm_intel_bo_gem *bo_gem;
701 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
702 bucket->head.next, head);
703 if (time - bo_gem->free_time <= 1)
706 DRMLISTDEL(&bo_gem->head);
708 drm_intel_gem_bo_free(&bo_gem->bo);
713 static void drm_intel_gem_bo_unreference_final(drm_intel_bo *bo)
715 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
716 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
717 struct drm_intel_gem_bo_bucket *bucket;
718 uint32_t tiling_mode;
720 if (bo_gem->relocs != NULL) {
723 /* Unreference all the target buffers */
724 for (i = 0; i < bo_gem->reloc_count; i++)
725 drm_intel_gem_bo_unreference_locked(bo_gem->
729 DBG("bo_unreference final: %d (%s)\n",
730 bo_gem->gem_handle, bo_gem->name);
732 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
733 /* Put the buffer into our internal cache for reuse if we can. */
734 tiling_mode = I915_TILING_NONE;
735 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
736 drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0) {
737 struct timespec time;
739 clock_gettime(CLOCK_MONOTONIC, &time);
740 bo_gem->free_time = time.tv_sec;
743 bo_gem->validate_index = -1;
744 bo_gem->reloc_count = 0;
746 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
748 drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem,
750 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
752 drm_intel_gem_bo_free(bo);
756 static void drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
758 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
760 assert(atomic_read(&bo_gem->refcount) > 0);
761 if (atomic_dec_and_test(&bo_gem->refcount))
762 drm_intel_gem_bo_unreference_final(bo);
765 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
767 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
769 assert(atomic_read(&bo_gem->refcount) > 0);
770 if (atomic_dec_and_test(&bo_gem->refcount)) {
771 drm_intel_bufmgr_gem *bufmgr_gem =
772 (drm_intel_bufmgr_gem *) bo->bufmgr;
773 pthread_mutex_lock(&bufmgr_gem->lock);
774 drm_intel_gem_bo_unreference_final(bo);
775 pthread_mutex_unlock(&bufmgr_gem->lock);
779 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
781 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
782 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
783 struct drm_i915_gem_set_domain set_domain;
786 pthread_mutex_lock(&bufmgr_gem->lock);
788 /* Allow recursive mapping. Mesa may recursively map buffers with
789 * nested display loops.
791 if (!bo_gem->mem_virtual) {
792 struct drm_i915_gem_mmap mmap_arg;
794 DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
796 memset(&mmap_arg, 0, sizeof(mmap_arg));
797 mmap_arg.handle = bo_gem->gem_handle;
799 mmap_arg.size = bo->size;
800 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
803 "%s:%d: Error mapping buffer %d (%s): %s .\n",
804 __FILE__, __LINE__, bo_gem->gem_handle,
805 bo_gem->name, strerror(errno));
806 pthread_mutex_unlock(&bufmgr_gem->lock);
809 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
811 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
812 bo_gem->mem_virtual);
813 bo->virtual = bo_gem->mem_virtual;
815 set_domain.handle = bo_gem->gem_handle;
816 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
818 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
820 set_domain.write_domain = 0;
822 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
824 } while (ret == -1 && errno == EINTR);
826 fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
827 __FILE__, __LINE__, bo_gem->gem_handle,
829 pthread_mutex_unlock(&bufmgr_gem->lock);
833 pthread_mutex_unlock(&bufmgr_gem->lock);
838 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
840 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
841 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
842 struct drm_i915_gem_set_domain set_domain;
845 pthread_mutex_lock(&bufmgr_gem->lock);
847 /* Get a mapping of the buffer if we haven't before. */
848 if (bo_gem->gtt_virtual == NULL) {
849 struct drm_i915_gem_mmap_gtt mmap_arg;
851 DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
854 memset(&mmap_arg, 0, sizeof(mmap_arg));
855 mmap_arg.handle = bo_gem->gem_handle;
857 /* Get the fake offset back... */
858 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT,
862 "%s:%d: Error preparing buffer map %d (%s): %s .\n",
864 bo_gem->gem_handle, bo_gem->name,
866 pthread_mutex_unlock(&bufmgr_gem->lock);
871 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
872 MAP_SHARED, bufmgr_gem->fd,
874 if (bo_gem->gtt_virtual == MAP_FAILED) {
876 "%s:%d: Error mapping buffer %d (%s): %s .\n",
878 bo_gem->gem_handle, bo_gem->name,
880 pthread_mutex_unlock(&bufmgr_gem->lock);
885 bo->virtual = bo_gem->gtt_virtual;
887 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
888 bo_gem->gtt_virtual);
890 /* Now move it to the GTT domain so that the CPU caches are flushed */
891 set_domain.handle = bo_gem->gem_handle;
892 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
893 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
895 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
897 } while (ret == -1 && errno == EINTR);
900 fprintf(stderr, "%s:%d: Error setting domain %d: %s\n",
901 __FILE__, __LINE__, bo_gem->gem_handle,
905 pthread_mutex_unlock(&bufmgr_gem->lock);
910 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
912 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
913 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
919 assert(bo_gem->gtt_virtual != NULL);
921 pthread_mutex_lock(&bufmgr_gem->lock);
923 pthread_mutex_unlock(&bufmgr_gem->lock);
928 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
930 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
931 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
932 struct drm_i915_gem_sw_finish sw_finish;
938 assert(bo_gem->mem_virtual != NULL);
940 pthread_mutex_lock(&bufmgr_gem->lock);
942 /* Cause a flush to happen if the buffer's pinned for scanout, so the
943 * results show up in a timely manner.
945 sw_finish.handle = bo_gem->gem_handle;
947 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH,
949 } while (ret == -1 && errno == EINTR);
952 pthread_mutex_unlock(&bufmgr_gem->lock);
957 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
958 unsigned long size, const void *data)
960 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
961 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
962 struct drm_i915_gem_pwrite pwrite;
965 memset(&pwrite, 0, sizeof(pwrite));
966 pwrite.handle = bo_gem->gem_handle;
967 pwrite.offset = offset;
969 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
971 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
972 } while (ret == -1 && errno == EINTR);
975 "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
976 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
977 (int)size, strerror(errno));
983 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
985 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
986 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
989 get_pipe_from_crtc_id.crtc_id = crtc_id;
990 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
991 &get_pipe_from_crtc_id);
993 /* We return -1 here to signal that we don't
994 * know which pipe is associated with this crtc.
995 * This lets the caller know that this information
996 * isn't available; using the wrong pipe for
997 * vblank waiting can cause the chipset to lock up
1002 return get_pipe_from_crtc_id.pipe;
1006 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1007 unsigned long size, void *data)
1009 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1010 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1011 struct drm_i915_gem_pread pread;
1014 memset(&pread, 0, sizeof(pread));
1015 pread.handle = bo_gem->gem_handle;
1016 pread.offset = offset;
1018 pread.data_ptr = (uint64_t) (uintptr_t) data;
1020 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
1021 } while (ret == -1 && errno == EINTR);
1024 "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1025 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1026 (int)size, strerror(errno));
1031 /** Waits for all GPU rendering to the object to have completed. */
1033 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1035 drm_intel_gem_bo_start_gtt_access(bo, 0);
1039 * Sets the object to the GTT read and possibly write domain, used by the X
1040 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1042 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1043 * can do tiled pixmaps this way.
1046 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1048 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1049 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1050 struct drm_i915_gem_set_domain set_domain;
1053 set_domain.handle = bo_gem->gem_handle;
1054 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1055 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1057 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
1059 } while (ret == -1 && errno == EINTR);
1062 "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1063 __FILE__, __LINE__, bo_gem->gem_handle,
1064 set_domain.read_domains, set_domain.write_domain,
1070 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1072 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1075 free(bufmgr_gem->exec_objects);
1076 free(bufmgr_gem->exec_bos);
1078 pthread_mutex_destroy(&bufmgr_gem->lock);
1080 /* Free any cached buffer objects we were going to reuse */
1081 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
1082 struct drm_intel_gem_bo_bucket *bucket =
1083 &bufmgr_gem->cache_bucket[i];
1084 drm_intel_bo_gem *bo_gem;
1086 while (!DRMLISTEMPTY(&bucket->head)) {
1087 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1088 bucket->head.next, head);
1089 DRMLISTDEL(&bo_gem->head);
1091 drm_intel_gem_bo_free(&bo_gem->bo);
1099 * Adds the target buffer to the validation list and adds the relocation
1100 * to the reloc_buffer's relocation list.
1102 * The relocation entry at the given offset must already contain the
1103 * precomputed relocation value, because the kernel will optimize out
1104 * the relocation entry write when the buffer hasn't moved from the
1105 * last known offset in target_bo.
1108 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1109 drm_intel_bo *target_bo, uint32_t target_offset,
1110 uint32_t read_domains, uint32_t write_domain)
1112 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1113 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1114 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1116 pthread_mutex_lock(&bufmgr_gem->lock);
1118 /* Create a new relocation list if needed */
1119 if (bo_gem->relocs == NULL)
1120 drm_intel_setup_reloc_list(bo);
1122 /* Check overflow */
1123 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1126 assert(offset <= bo->size - 4);
1127 assert((write_domain & (write_domain - 1)) == 0);
1129 /* Make sure that we're not adding a reloc to something whose size has
1130 * already been accounted for.
1132 assert(!bo_gem->used_as_reloc_target);
1133 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1134 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1136 /* Flag the target to disallow further relocations in it. */
1137 target_bo_gem->used_as_reloc_target = 1;
1139 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1140 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1141 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1142 target_bo_gem->gem_handle;
1143 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1144 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1145 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1147 bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
1148 drm_intel_gem_bo_reference(target_bo);
1150 bo_gem->reloc_count++;
1152 pthread_mutex_unlock(&bufmgr_gem->lock);
1158 * Walk the tree of relocations rooted at BO and accumulate the list of
1159 * validations to be performed and update the relocation buffers with
1160 * index values into the validation list.
1163 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1165 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1168 if (bo_gem->relocs == NULL)
1171 for (i = 0; i < bo_gem->reloc_count; i++) {
1172 drm_intel_bo *target_bo = bo_gem->reloc_target_bo[i];
1174 /* Continue walking the tree depth-first. */
1175 drm_intel_gem_bo_process_reloc(target_bo);
1177 /* Add the target to the validate list */
1178 drm_intel_add_validate_buffer(target_bo);
1183 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1187 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1188 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1189 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1191 /* Update the buffer offset */
1192 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1193 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1194 bo_gem->gem_handle, bo_gem->name, bo->offset,
1195 (unsigned long long)bufmgr_gem->exec_objects[i].
1197 bo->offset = bufmgr_gem->exec_objects[i].offset;
1203 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
1204 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
1206 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1207 struct drm_i915_gem_execbuffer execbuf;
1210 pthread_mutex_lock(&bufmgr_gem->lock);
1211 /* Update indices and set up the validate list. */
1212 drm_intel_gem_bo_process_reloc(bo);
1214 /* Add the batch buffer to the validation list. There are no
1215 * relocations pointing to it.
1217 drm_intel_add_validate_buffer(bo);
1219 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1220 execbuf.buffer_count = bufmgr_gem->exec_count;
1221 execbuf.batch_start_offset = 0;
1222 execbuf.batch_len = used;
1223 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1224 execbuf.num_cliprects = num_cliprects;
1229 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER,
1231 } while (ret != 0 && errno == EAGAIN);
1233 if (ret != 0 && errno == ENOMEM) {
1235 "Execbuffer fails to pin. "
1236 "Estimate: %u. Actual: %u. Available: %u\n",
1237 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1240 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1243 (unsigned int)bufmgr_gem->gtt_size);
1245 drm_intel_update_buffer_offsets(bufmgr_gem);
1247 if (bufmgr_gem->bufmgr.debug)
1248 drm_intel_gem_dump_validation_list(bufmgr_gem);
1250 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1251 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1252 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1254 /* Disconnect the buffer from the validate list */
1255 bo_gem->validate_index = -1;
1256 drm_intel_gem_bo_unreference_locked(bo);
1257 bufmgr_gem->exec_bos[i] = NULL;
1259 bufmgr_gem->exec_count = 0;
1260 pthread_mutex_unlock(&bufmgr_gem->lock);
1266 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
1268 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1269 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1270 struct drm_i915_gem_pin pin;
1273 memset(&pin, 0, sizeof(pin));
1274 pin.handle = bo_gem->gem_handle;
1275 pin.alignment = alignment;
1278 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin);
1279 } while (ret == -1 && errno == EINTR);
1284 bo->offset = pin.offset;
1289 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
1291 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1292 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1293 struct drm_i915_gem_unpin unpin;
1296 memset(&unpin, 0, sizeof(unpin));
1297 unpin.handle = bo_gem->gem_handle;
1299 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
1307 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1310 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1311 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1312 struct drm_i915_gem_set_tiling set_tiling;
1315 if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode)
1318 /* If we're going from non-tiling to tiling, bump fence count */
1319 if (bo_gem->tiling_mode == I915_TILING_NONE)
1320 bo_gem->reloc_tree_fences++;
1322 memset(&set_tiling, 0, sizeof(set_tiling));
1323 set_tiling.handle = bo_gem->gem_handle;
1324 set_tiling.tiling_mode = *tiling_mode;
1325 set_tiling.stride = stride;
1327 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
1329 *tiling_mode = bo_gem->tiling_mode;
1332 bo_gem->tiling_mode = set_tiling.tiling_mode;
1333 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
1335 /* If we're going from tiling to non-tiling, drop fence count */
1336 if (bo_gem->tiling_mode == I915_TILING_NONE)
1337 bo_gem->reloc_tree_fences--;
1339 *tiling_mode = bo_gem->tiling_mode;
1344 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1345 uint32_t * swizzle_mode)
1347 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1349 *tiling_mode = bo_gem->tiling_mode;
1350 *swizzle_mode = bo_gem->swizzle_mode;
1355 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
1357 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1358 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1359 struct drm_gem_flink flink;
1362 if (!bo_gem->global_name) {
1363 memset(&flink, 0, sizeof(flink));
1364 flink.handle = bo_gem->gem_handle;
1366 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1369 bo_gem->global_name = flink.name;
1370 bo_gem->reusable = 0;
1373 *name = bo_gem->global_name;
1378 * Enables unlimited caching of buffer objects for reuse.
1380 * This is potentially very memory expensive, as the cache at each bucket
1381 * size is only bounded by how many buffers of that size we've managed to have
1382 * in flight at once.
1385 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
1387 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1389 bufmgr_gem->bo_reuse = 1;
1393 * Return the additional aperture space required by the tree of buffer objects
1397 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
1399 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1403 if (bo == NULL || bo_gem->included_in_check_aperture)
1407 bo_gem->included_in_check_aperture = 1;
1409 for (i = 0; i < bo_gem->reloc_count; i++)
1411 drm_intel_gem_bo_get_aperture_space(bo_gem->
1412 reloc_target_bo[i]);
1418 * Count the number of buffers in this list that need a fence reg
1420 * If the count is greater than the number of available regs, we'll have
1421 * to ask the caller to resubmit a batch with fewer tiled buffers.
1423 * This function over-counts if the same buffer is used multiple times.
1426 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
1429 unsigned int total = 0;
1431 for (i = 0; i < count; i++) {
1432 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1437 total += bo_gem->reloc_tree_fences;
1443 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
1444 * for the next drm_intel_bufmgr_check_aperture_space() call.
1447 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
1449 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1452 if (bo == NULL || !bo_gem->included_in_check_aperture)
1455 bo_gem->included_in_check_aperture = 0;
1457 for (i = 0; i < bo_gem->reloc_count; i++)
1458 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
1459 reloc_target_bo[i]);
1463 * Return a conservative estimate for the amount of aperture required
1464 * for a collection of buffers. This may double-count some buffers.
1467 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
1470 unsigned int total = 0;
1472 for (i = 0; i < count; i++) {
1473 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1475 total += bo_gem->reloc_tree_size;
1481 * Return the amount of aperture needed for a collection of buffers.
1482 * This avoids double counting any buffers, at the cost of looking
1483 * at every buffer in the set.
1486 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
1489 unsigned int total = 0;
1491 for (i = 0; i < count; i++) {
1492 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
1493 /* For the first buffer object in the array, we get an
1494 * accurate count back for its reloc_tree size (since nothing
1495 * had been flagged as being counted yet). We can save that
1496 * value out as a more conservative reloc_tree_size that
1497 * avoids double-counting target buffers. Since the first
1498 * buffer happens to usually be the batch buffer in our
1499 * callers, this can pull us back from doing the tree
1500 * walk on every new batch emit.
1503 drm_intel_bo_gem *bo_gem =
1504 (drm_intel_bo_gem *) bo_array[i];
1505 bo_gem->reloc_tree_size = total;
1509 for (i = 0; i < count; i++)
1510 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
1515 * Return -1 if the batchbuffer should be flushed before attempting to
1516 * emit rendering referencing the buffers pointed to by bo_array.
1518 * This is required because if we try to emit a batchbuffer with relocations
1519 * to a tree of buffers that won't simultaneously fit in the aperture,
1520 * the rendering will return an error at a point where the software is not
1521 * prepared to recover from it.
1523 * However, we also want to emit the batchbuffer significantly before we reach
1524 * the limit, as a series of batchbuffers each of which references buffers
1525 * covering almost all of the aperture means that at each emit we end up
1526 * waiting to evict a buffer from the last rendering, and we get synchronous
1527 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
1528 * get better parallelism.
1531 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
1533 drm_intel_bufmgr_gem *bufmgr_gem =
1534 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
1535 unsigned int total = 0;
1536 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
1539 /* Check for fence reg constraints if necessary */
1540 if (bufmgr_gem->available_fences) {
1541 total_fences = drm_intel_gem_total_fences(bo_array, count);
1542 if (total_fences > bufmgr_gem->available_fences)
1546 total = drm_intel_gem_estimate_batch_space(bo_array, count);
1548 if (total > threshold)
1549 total = drm_intel_gem_compute_batch_space(bo_array, count);
1551 if (total > threshold) {
1552 DBG("check_space: overflowed available aperture, "
1554 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
1557 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
1558 (int)bufmgr_gem->gtt_size / 1024);
1564 * Disable buffer reuse for objects which are shared with the kernel
1565 * as scanout buffers
1568 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
1570 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1572 bo_gem->reusable = 0;
1577 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
1578 * for the next drm_intel_bufmgr_check_aperture_space() call.
1581 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
1583 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1586 if (bo == NULL || target_bo == NULL)
1589 for (i = 0; i < bo_gem->reloc_count; i++) {
1590 if (bo_gem->reloc_target_bo[i] == target_bo)
1592 if (drm_intel_gem_bo_references(bo_gem->reloc_target_bo[i],
1601 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1602 * and manage map buffer objections.
1604 * \param fd File descriptor of the opened DRM device.
1607 drm_intel_bufmgr_gem_init(int fd, int batch_size)
1609 drm_intel_bufmgr_gem *bufmgr_gem;
1610 struct drm_i915_gem_get_aperture aperture;
1611 drm_i915_getparam_t gp;
1615 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
1616 bufmgr_gem->fd = fd;
1618 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
1623 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1626 bufmgr_gem->gtt_size = aperture.aper_available_size;
1628 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
1630 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
1631 fprintf(stderr, "Assuming %dkB available aperture size.\n"
1632 "May lead to reduced performance or incorrect "
1634 (int)bufmgr_gem->gtt_size / 1024);
1637 gp.param = I915_PARAM_CHIPSET_ID;
1638 gp.value = &bufmgr_gem->pci_device;
1639 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1641 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
1642 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
1645 if (!IS_I965G(bufmgr_gem)) {
1646 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
1647 gp.value = &bufmgr_gem->available_fences;
1648 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1650 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
1652 fprintf(stderr, "param: %d, val: %d\n", gp.param,
1654 bufmgr_gem->available_fences = 0;
1658 /* Let's go with one relocation per every 2 dwords (but round down a bit
1659 * since a power of two will mean an extra page allocation for the reloc
1662 * Every 4 was too few for the blender benchmark.
1664 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
1666 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
1667 bufmgr_gem->bufmgr.bo_alloc_for_render =
1668 drm_intel_gem_bo_alloc_for_render;
1669 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
1670 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
1671 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
1672 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
1673 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
1674 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
1675 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
1676 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
1677 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
1678 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
1679 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
1680 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
1681 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
1682 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
1683 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
1684 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
1685 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
1686 bufmgr_gem->bufmgr.debug = 0;
1687 bufmgr_gem->bufmgr.check_aperture_space =
1688 drm_intel_gem_check_aperture_space;
1689 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
1690 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
1691 drm_intel_gem_get_pipe_from_crtc_id;
1692 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
1694 /* Initialize the linked lists for BO reuse cache. */
1695 for (i = 0, size = 4096; i < DRM_INTEL_GEM_BO_BUCKETS; i++, size *= 2) {
1696 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
1697 bufmgr_gem->cache_bucket[i].size = size;
1700 return &bufmgr_gem->bufmgr;