1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
53 #include <sys/types.h>
57 #include "libdrm_lists.h"
58 #include "intel_bufmgr.h"
59 #include "intel_bufmgr_priv.h"
60 #include "intel_chipset.h"
73 #define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
75 #define DBG(...) do { \
76 if (bufmgr_gem->bufmgr.debug) \
77 fprintf(stderr, __VA_ARGS__); \
80 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
82 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
84 struct drm_intel_gem_bo_bucket {
89 typedef struct _drm_intel_bufmgr_gem {
90 drm_intel_bufmgr bufmgr;
98 struct drm_i915_gem_exec_object *exec_objects;
99 struct drm_i915_gem_exec_object2 *exec2_objects;
100 drm_intel_bo **exec_bos;
104 /** Array of lists of cached gem objects of power-of-two sizes */
105 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
110 drmMMListHead vma_cache;
111 int vma_count, vma_open, vma_max;
114 int available_fences;
117 unsigned int has_bsd : 1;
118 unsigned int has_blt : 1;
119 unsigned int has_relaxed_fencing : 1;
120 unsigned int has_llc : 1;
121 unsigned int bo_reuse : 1;
122 unsigned int no_exec : 1;
124 } drm_intel_bufmgr_gem;
126 #define DRM_INTEL_RELOC_FENCE (1<<0)
128 typedef struct _drm_intel_reloc_target_info {
131 } drm_intel_reloc_target;
133 struct _drm_intel_bo_gem {
141 * Kenel-assigned global name for this object
143 unsigned int global_name;
144 drmMMListHead name_list;
147 * Index of the buffer within the validation list while preparing a
148 * batchbuffer execution.
153 * Current tiling mode
155 uint32_t tiling_mode;
156 uint32_t swizzle_mode;
157 unsigned long stride;
161 /** Array passed to the DRM containing relocation information. */
162 struct drm_i915_gem_relocation_entry *relocs;
164 * Array of info structs corresponding to relocs[i].target_handle etc
166 drm_intel_reloc_target *reloc_target_info;
167 /** Number of entries in relocs */
169 /** Mapped address for the buffer, saved across map/unmap cycles */
171 /** GTT virtual address for the buffer, saved across map/unmap cycles */
174 drmMMListHead vma_list;
180 * Boolean of whether this BO and its children have been included in
181 * the current drm_intel_bufmgr_check_aperture_space() total.
183 bool included_in_check_aperture;
186 * Boolean of whether this buffer has been used as a relocation
187 * target and had its size accounted for, and thus can't have any
188 * further relocations added to it.
190 bool used_as_reloc_target;
193 * Boolean of whether we have encountered an error whilst building the relocation tree.
198 * Boolean of whether this buffer can be re-used
203 * Size in bytes of this buffer and its relocation descendents.
205 * Used to avoid costly tree walking in
206 * drm_intel_bufmgr_check_aperture in the common case.
211 * Number of potential fence registers required by this buffer and its
214 int reloc_tree_fences;
216 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
217 bool mapped_cpu_write;
221 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
224 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
227 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
228 uint32_t * swizzle_mode);
231 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
232 uint32_t tiling_mode,
235 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
238 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
240 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
243 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
244 uint32_t *tiling_mode)
246 unsigned long min_size, max_size;
249 if (*tiling_mode == I915_TILING_NONE)
252 /* 965+ just need multiples of page size for tiling */
253 if (bufmgr_gem->gen >= 4)
254 return ROUND_UP_TO(size, 4096);
256 /* Older chips need powers of two, of at least 512k or 1M */
257 if (bufmgr_gem->gen == 3) {
258 min_size = 1024*1024;
259 max_size = 128*1024*1024;
262 max_size = 64*1024*1024;
265 if (size > max_size) {
266 *tiling_mode = I915_TILING_NONE;
270 /* Do we need to allocate every page for the fence? */
271 if (bufmgr_gem->has_relaxed_fencing)
272 return ROUND_UP_TO(size, 4096);
274 for (i = min_size; i < size; i <<= 1)
281 * Round a given pitch up to the minimum required for X tiling on a
282 * given chip. We use 512 as the minimum to allow for a later tiling
286 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
287 unsigned long pitch, uint32_t *tiling_mode)
289 unsigned long tile_width;
292 /* If untiled, then just align it so that we can do rendering
293 * to it with the 3D engine.
295 if (*tiling_mode == I915_TILING_NONE)
296 return ALIGN(pitch, 64);
298 if (*tiling_mode == I915_TILING_X
299 || (IS_915(bufmgr_gem->pci_device)
300 && *tiling_mode == I915_TILING_Y))
305 /* 965 is flexible */
306 if (bufmgr_gem->gen >= 4)
307 return ROUND_UP_TO(pitch, tile_width);
309 /* The older hardware has a maximum pitch of 8192 with tiled
310 * surfaces, so fallback to untiled if it's too large.
313 *tiling_mode = I915_TILING_NONE;
314 return ALIGN(pitch, 64);
317 /* Pre-965 needs power of two tile width */
318 for (i = tile_width; i < pitch; i <<= 1)
324 static struct drm_intel_gem_bo_bucket *
325 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
330 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
331 struct drm_intel_gem_bo_bucket *bucket =
332 &bufmgr_gem->cache_bucket[i];
333 if (bucket->size >= size) {
342 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
346 for (i = 0; i < bufmgr_gem->exec_count; i++) {
347 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
348 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
350 if (bo_gem->relocs == NULL) {
351 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
356 for (j = 0; j < bo_gem->reloc_count; j++) {
357 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
358 drm_intel_bo_gem *target_gem =
359 (drm_intel_bo_gem *) target_bo;
361 DBG("%2d: %d (%s)@0x%08llx -> "
362 "%d (%s)@0x%08lx + 0x%08x\n",
364 bo_gem->gem_handle, bo_gem->name,
365 (unsigned long long)bo_gem->relocs[j].offset,
366 target_gem->gem_handle,
369 bo_gem->relocs[j].delta);
375 drm_intel_gem_bo_reference(drm_intel_bo *bo)
377 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
379 atomic_inc(&bo_gem->refcount);
383 * Adds the given buffer to the list of buffers to be validated (moved into the
384 * appropriate memory type) with the next batch submission.
386 * If a buffer is validated multiple times in a batch submission, it ends up
387 * with the intersection of the memory type flags and the union of the
391 drm_intel_add_validate_buffer(drm_intel_bo *bo)
393 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
394 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
397 if (bo_gem->validate_index != -1)
400 /* Extend the array of validation entries as necessary. */
401 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
402 int new_size = bufmgr_gem->exec_size * 2;
407 bufmgr_gem->exec_objects =
408 realloc(bufmgr_gem->exec_objects,
409 sizeof(*bufmgr_gem->exec_objects) * new_size);
410 bufmgr_gem->exec_bos =
411 realloc(bufmgr_gem->exec_bos,
412 sizeof(*bufmgr_gem->exec_bos) * new_size);
413 bufmgr_gem->exec_size = new_size;
416 index = bufmgr_gem->exec_count;
417 bo_gem->validate_index = index;
418 /* Fill in array entry */
419 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
420 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
421 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
422 bufmgr_gem->exec_objects[index].alignment = 0;
423 bufmgr_gem->exec_objects[index].offset = 0;
424 bufmgr_gem->exec_bos[index] = bo;
425 bufmgr_gem->exec_count++;
429 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
431 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
432 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
435 if (bo_gem->validate_index != -1) {
437 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
438 EXEC_OBJECT_NEEDS_FENCE;
442 /* Extend the array of validation entries as necessary. */
443 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
444 int new_size = bufmgr_gem->exec_size * 2;
449 bufmgr_gem->exec2_objects =
450 realloc(bufmgr_gem->exec2_objects,
451 sizeof(*bufmgr_gem->exec2_objects) * new_size);
452 bufmgr_gem->exec_bos =
453 realloc(bufmgr_gem->exec_bos,
454 sizeof(*bufmgr_gem->exec_bos) * new_size);
455 bufmgr_gem->exec_size = new_size;
458 index = bufmgr_gem->exec_count;
459 bo_gem->validate_index = index;
460 /* Fill in array entry */
461 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
462 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
463 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
464 bufmgr_gem->exec2_objects[index].alignment = 0;
465 bufmgr_gem->exec2_objects[index].offset = 0;
466 bufmgr_gem->exec_bos[index] = bo;
467 bufmgr_gem->exec2_objects[index].flags = 0;
468 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
469 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
471 bufmgr_gem->exec2_objects[index].flags |=
472 EXEC_OBJECT_NEEDS_FENCE;
474 bufmgr_gem->exec_count++;
477 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
481 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
482 drm_intel_bo_gem *bo_gem)
486 assert(!bo_gem->used_as_reloc_target);
488 /* The older chipsets are far-less flexible in terms of tiling,
489 * and require tiled buffer to be size aligned in the aperture.
490 * This means that in the worst possible case we will need a hole
491 * twice as large as the object in order for it to fit into the
492 * aperture. Optimal packing is for wimps.
494 size = bo_gem->bo.size;
495 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
498 if (bufmgr_gem->has_relaxed_fencing) {
499 if (bufmgr_gem->gen == 3)
500 min_size = 1024*1024;
504 while (min_size < size)
509 /* Account for worst-case alignment. */
513 bo_gem->reloc_tree_size = size;
517 drm_intel_setup_reloc_list(drm_intel_bo *bo)
519 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
520 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
521 unsigned int max_relocs = bufmgr_gem->max_relocs;
523 if (bo->size / 4 < max_relocs)
524 max_relocs = bo->size / 4;
526 bo_gem->relocs = malloc(max_relocs *
527 sizeof(struct drm_i915_gem_relocation_entry));
528 bo_gem->reloc_target_info = malloc(max_relocs *
529 sizeof(drm_intel_reloc_target));
530 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
531 bo_gem->has_error = true;
533 free (bo_gem->relocs);
534 bo_gem->relocs = NULL;
536 free (bo_gem->reloc_target_info);
537 bo_gem->reloc_target_info = NULL;
546 drm_intel_gem_bo_busy(drm_intel_bo *bo)
548 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
549 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
550 struct drm_i915_gem_busy busy;
554 busy.handle = bo_gem->gem_handle;
556 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
558 return (ret == 0 && busy.busy);
562 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
563 drm_intel_bo_gem *bo_gem, int state)
565 struct drm_i915_gem_madvise madv;
568 madv.handle = bo_gem->gem_handle;
571 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
573 return madv.retained;
577 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
579 return drm_intel_gem_bo_madvise_internal
580 ((drm_intel_bufmgr_gem *) bo->bufmgr,
581 (drm_intel_bo_gem *) bo,
585 /* drop the oldest entries that have been purged by the kernel */
587 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
588 struct drm_intel_gem_bo_bucket *bucket)
590 while (!DRMLISTEMPTY(&bucket->head)) {
591 drm_intel_bo_gem *bo_gem;
593 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
594 bucket->head.next, head);
595 if (drm_intel_gem_bo_madvise_internal
596 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
599 DRMLISTDEL(&bo_gem->head);
600 drm_intel_gem_bo_free(&bo_gem->bo);
604 static drm_intel_bo *
605 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
609 uint32_t tiling_mode,
610 unsigned long stride)
612 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
613 drm_intel_bo_gem *bo_gem;
614 unsigned int page_size = getpagesize();
616 struct drm_intel_gem_bo_bucket *bucket;
617 bool alloc_from_cache;
618 unsigned long bo_size;
619 bool for_render = false;
621 if (flags & BO_ALLOC_FOR_RENDER)
624 /* Round the allocated size up to a power of two number of pages. */
625 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
627 /* If we don't have caching at this size, don't actually round the
630 if (bucket == NULL) {
632 if (bo_size < page_size)
635 bo_size = bucket->size;
638 pthread_mutex_lock(&bufmgr_gem->lock);
639 /* Get a buffer out of the cache if available */
641 alloc_from_cache = false;
642 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
644 /* Allocate new render-target BOs from the tail (MRU)
645 * of the list, as it will likely be hot in the GPU
646 * cache and in the aperture for us.
648 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
649 bucket->head.prev, head);
650 DRMLISTDEL(&bo_gem->head);
651 alloc_from_cache = true;
653 /* For non-render-target BOs (where we're probably
654 * going to map it first thing in order to fill it
655 * with data), check if the last BO in the cache is
656 * unbusy, and only reuse in that case. Otherwise,
657 * allocating a new buffer is probably faster than
658 * waiting for the GPU to finish.
660 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
661 bucket->head.next, head);
662 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
663 alloc_from_cache = true;
664 DRMLISTDEL(&bo_gem->head);
668 if (alloc_from_cache) {
669 if (!drm_intel_gem_bo_madvise_internal
670 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
671 drm_intel_gem_bo_free(&bo_gem->bo);
672 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
677 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
680 drm_intel_gem_bo_free(&bo_gem->bo);
685 pthread_mutex_unlock(&bufmgr_gem->lock);
687 if (!alloc_from_cache) {
688 struct drm_i915_gem_create create;
690 bo_gem = calloc(1, sizeof(*bo_gem));
694 bo_gem->bo.size = bo_size;
697 create.size = bo_size;
699 ret = drmIoctl(bufmgr_gem->fd,
700 DRM_IOCTL_I915_GEM_CREATE,
702 bo_gem->gem_handle = create.handle;
703 bo_gem->bo.handle = bo_gem->gem_handle;
708 bo_gem->bo.bufmgr = bufmgr;
710 bo_gem->tiling_mode = I915_TILING_NONE;
711 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
714 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
717 drm_intel_gem_bo_free(&bo_gem->bo);
721 DRMINITLISTHEAD(&bo_gem->name_list);
722 DRMINITLISTHEAD(&bo_gem->vma_list);
726 atomic_set(&bo_gem->refcount, 1);
727 bo_gem->validate_index = -1;
728 bo_gem->reloc_tree_fences = 0;
729 bo_gem->used_as_reloc_target = false;
730 bo_gem->has_error = false;
731 bo_gem->reusable = true;
733 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
735 DBG("bo_create: buf %d (%s) %ldb\n",
736 bo_gem->gem_handle, bo_gem->name, size);
741 static drm_intel_bo *
742 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
745 unsigned int alignment)
747 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
749 I915_TILING_NONE, 0);
752 static drm_intel_bo *
753 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
756 unsigned int alignment)
758 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
759 I915_TILING_NONE, 0);
762 static drm_intel_bo *
763 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
764 int x, int y, int cpp, uint32_t *tiling_mode,
765 unsigned long *pitch, unsigned long flags)
767 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
768 unsigned long size, stride;
772 unsigned long aligned_y, height_alignment;
774 tiling = *tiling_mode;
776 /* If we're tiled, our allocations are in 8 or 32-row blocks,
777 * so failure to align our height means that we won't allocate
780 * If we're untiled, we still have to align to 2 rows high
781 * because the data port accesses 2x2 blocks even if the
782 * bottom row isn't to be rendered, so failure to align means
783 * we could walk off the end of the GTT and fault. This is
784 * documented on 965, and may be the case on older chipsets
785 * too so we try to be careful.
788 height_alignment = 2;
790 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
791 height_alignment = 16;
792 else if (tiling == I915_TILING_X
793 || (IS_915(bufmgr_gem->pci_device)
794 && tiling == I915_TILING_Y))
795 height_alignment = 8;
796 else if (tiling == I915_TILING_Y)
797 height_alignment = 32;
798 aligned_y = ALIGN(y, height_alignment);
801 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
802 size = stride * aligned_y;
803 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
804 } while (*tiling_mode != tiling);
807 if (tiling == I915_TILING_NONE)
810 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
815 * Returns a drm_intel_bo wrapping the given buffer object handle.
817 * This can be used when one application needs to pass a buffer object
821 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
825 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
826 drm_intel_bo_gem *bo_gem;
828 struct drm_gem_open open_arg;
829 struct drm_i915_gem_get_tiling get_tiling;
832 /* At the moment most applications only have a few named bo.
833 * For instance, in a DRI client only the render buffers passed
834 * between X and the client are named. And since X returns the
835 * alternating names for the front/back buffer a linear search
836 * provides a sufficiently fast match.
838 for (list = bufmgr_gem->named.next;
839 list != &bufmgr_gem->named;
841 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
842 if (bo_gem->global_name == handle) {
843 drm_intel_gem_bo_reference(&bo_gem->bo);
848 bo_gem = calloc(1, sizeof(*bo_gem));
853 open_arg.name = handle;
854 ret = drmIoctl(bufmgr_gem->fd,
858 DBG("Couldn't reference %s handle 0x%08x: %s\n",
859 name, handle, strerror(errno));
863 bo_gem->bo.size = open_arg.size;
864 bo_gem->bo.offset = 0;
865 bo_gem->bo.virtual = NULL;
866 bo_gem->bo.bufmgr = bufmgr;
868 atomic_set(&bo_gem->refcount, 1);
869 bo_gem->validate_index = -1;
870 bo_gem->gem_handle = open_arg.handle;
871 bo_gem->bo.handle = open_arg.handle;
872 bo_gem->global_name = handle;
873 bo_gem->reusable = false;
875 VG_CLEAR(get_tiling);
876 get_tiling.handle = bo_gem->gem_handle;
877 ret = drmIoctl(bufmgr_gem->fd,
878 DRM_IOCTL_I915_GEM_GET_TILING,
881 drm_intel_gem_bo_unreference(&bo_gem->bo);
884 bo_gem->tiling_mode = get_tiling.tiling_mode;
885 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
886 /* XXX stride is unknown */
887 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
889 DRMINITLISTHEAD(&bo_gem->vma_list);
890 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
891 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
897 drm_intel_gem_bo_free(drm_intel_bo *bo)
899 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
900 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
901 struct drm_gem_close close;
904 DRMLISTDEL(&bo_gem->vma_list);
905 if (bo_gem->mem_virtual) {
906 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
907 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
908 bufmgr_gem->vma_count--;
910 if (bo_gem->gtt_virtual) {
911 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
912 bufmgr_gem->vma_count--;
915 /* Close this object */
917 close.handle = bo_gem->gem_handle;
918 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
920 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
921 bo_gem->gem_handle, bo_gem->name, strerror(errno));
927 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
930 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
932 if (bo_gem->mem_virtual)
933 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
935 if (bo_gem->gtt_virtual)
936 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
940 /** Frees all cached buffers significantly older than @time. */
942 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
946 if (bufmgr_gem->time == time)
949 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
950 struct drm_intel_gem_bo_bucket *bucket =
951 &bufmgr_gem->cache_bucket[i];
953 while (!DRMLISTEMPTY(&bucket->head)) {
954 drm_intel_bo_gem *bo_gem;
956 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
957 bucket->head.next, head);
958 if (time - bo_gem->free_time <= 1)
961 DRMLISTDEL(&bo_gem->head);
963 drm_intel_gem_bo_free(&bo_gem->bo);
967 bufmgr_gem->time = time;
970 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
974 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
975 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
977 if (bufmgr_gem->vma_max < 0)
980 /* We may need to evict a few entries in order to create new mmaps */
981 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
985 while (bufmgr_gem->vma_count > limit) {
986 drm_intel_bo_gem *bo_gem;
988 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
989 bufmgr_gem->vma_cache.next,
991 assert(bo_gem->map_count == 0);
992 DRMLISTDELINIT(&bo_gem->vma_list);
994 if (bo_gem->mem_virtual) {
995 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
996 bo_gem->mem_virtual = NULL;
997 bufmgr_gem->vma_count--;
999 if (bo_gem->gtt_virtual) {
1000 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1001 bo_gem->gtt_virtual = NULL;
1002 bufmgr_gem->vma_count--;
1007 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1008 drm_intel_bo_gem *bo_gem)
1010 bufmgr_gem->vma_open--;
1011 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1012 if (bo_gem->mem_virtual)
1013 bufmgr_gem->vma_count++;
1014 if (bo_gem->gtt_virtual)
1015 bufmgr_gem->vma_count++;
1016 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1019 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1020 drm_intel_bo_gem *bo_gem)
1022 bufmgr_gem->vma_open++;
1023 DRMLISTDEL(&bo_gem->vma_list);
1024 if (bo_gem->mem_virtual)
1025 bufmgr_gem->vma_count--;
1026 if (bo_gem->gtt_virtual)
1027 bufmgr_gem->vma_count--;
1028 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1032 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1034 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1035 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1036 struct drm_intel_gem_bo_bucket *bucket;
1039 /* Unreference all the target buffers */
1040 for (i = 0; i < bo_gem->reloc_count; i++) {
1041 if (bo_gem->reloc_target_info[i].bo != bo) {
1042 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1043 reloc_target_info[i].bo,
1047 bo_gem->reloc_count = 0;
1048 bo_gem->used_as_reloc_target = false;
1050 DBG("bo_unreference final: %d (%s)\n",
1051 bo_gem->gem_handle, bo_gem->name);
1053 /* release memory associated with this object */
1054 if (bo_gem->reloc_target_info) {
1055 free(bo_gem->reloc_target_info);
1056 bo_gem->reloc_target_info = NULL;
1058 if (bo_gem->relocs) {
1059 free(bo_gem->relocs);
1060 bo_gem->relocs = NULL;
1063 /* Clear any left-over mappings */
1064 if (bo_gem->map_count) {
1065 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1066 bo_gem->map_count = 0;
1067 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1068 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1071 DRMLISTDEL(&bo_gem->name_list);
1073 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1074 /* Put the buffer into our internal cache for reuse if we can. */
1075 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1076 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1077 I915_MADV_DONTNEED)) {
1078 bo_gem->free_time = time;
1080 bo_gem->name = NULL;
1081 bo_gem->validate_index = -1;
1083 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1085 drm_intel_gem_bo_free(bo);
1089 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1092 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1094 assert(atomic_read(&bo_gem->refcount) > 0);
1095 if (atomic_dec_and_test(&bo_gem->refcount))
1096 drm_intel_gem_bo_unreference_final(bo, time);
1099 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1101 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1103 assert(atomic_read(&bo_gem->refcount) > 0);
1104 if (atomic_dec_and_test(&bo_gem->refcount)) {
1105 drm_intel_bufmgr_gem *bufmgr_gem =
1106 (drm_intel_bufmgr_gem *) bo->bufmgr;
1107 struct timespec time;
1109 clock_gettime(CLOCK_MONOTONIC, &time);
1111 pthread_mutex_lock(&bufmgr_gem->lock);
1112 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1113 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1114 pthread_mutex_unlock(&bufmgr_gem->lock);
1118 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1120 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1121 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1122 struct drm_i915_gem_set_domain set_domain;
1125 pthread_mutex_lock(&bufmgr_gem->lock);
1127 if (bo_gem->map_count++ == 0)
1128 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1130 if (!bo_gem->mem_virtual) {
1131 struct drm_i915_gem_mmap mmap_arg;
1133 DBG("bo_map: %d (%s), map_count=%d\n",
1134 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1137 mmap_arg.handle = bo_gem->gem_handle;
1138 mmap_arg.offset = 0;
1139 mmap_arg.size = bo->size;
1140 ret = drmIoctl(bufmgr_gem->fd,
1141 DRM_IOCTL_I915_GEM_MMAP,
1145 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1146 __FILE__, __LINE__, bo_gem->gem_handle,
1147 bo_gem->name, strerror(errno));
1148 if (--bo_gem->map_count == 0)
1149 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1150 pthread_mutex_unlock(&bufmgr_gem->lock);
1153 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1154 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1156 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1157 bo_gem->mem_virtual);
1158 bo->virtual = bo_gem->mem_virtual;
1160 VG_CLEAR(set_domain);
1161 set_domain.handle = bo_gem->gem_handle;
1162 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1164 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1166 set_domain.write_domain = 0;
1167 ret = drmIoctl(bufmgr_gem->fd,
1168 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1171 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1172 __FILE__, __LINE__, bo_gem->gem_handle,
1177 bo_gem->mapped_cpu_write = true;
1179 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1180 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1181 pthread_mutex_unlock(&bufmgr_gem->lock);
1186 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1188 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1189 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1190 struct drm_i915_gem_set_domain set_domain;
1193 pthread_mutex_lock(&bufmgr_gem->lock);
1195 if (bo_gem->map_count++ == 0)
1196 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1198 /* Get a mapping of the buffer if we haven't before. */
1199 if (bo_gem->gtt_virtual == NULL) {
1200 struct drm_i915_gem_mmap_gtt mmap_arg;
1202 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1203 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1206 mmap_arg.handle = bo_gem->gem_handle;
1208 /* Get the fake offset back... */
1209 ret = drmIoctl(bufmgr_gem->fd,
1210 DRM_IOCTL_I915_GEM_MMAP_GTT,
1214 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1216 bo_gem->gem_handle, bo_gem->name,
1218 if (--bo_gem->map_count == 0)
1219 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1220 pthread_mutex_unlock(&bufmgr_gem->lock);
1225 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1226 MAP_SHARED, bufmgr_gem->fd,
1228 if (bo_gem->gtt_virtual == MAP_FAILED) {
1229 bo_gem->gtt_virtual = NULL;
1231 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1233 bo_gem->gem_handle, bo_gem->name,
1235 if (--bo_gem->map_count == 0)
1236 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1237 pthread_mutex_unlock(&bufmgr_gem->lock);
1242 bo->virtual = bo_gem->gtt_virtual;
1244 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1245 bo_gem->gtt_virtual);
1247 /* Now move it to the GTT domain so that the CPU caches are flushed */
1248 VG_CLEAR(set_domain);
1249 set_domain.handle = bo_gem->gem_handle;
1250 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1251 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1252 ret = drmIoctl(bufmgr_gem->fd,
1253 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1256 DBG("%s:%d: Error setting domain %d: %s\n",
1257 __FILE__, __LINE__, bo_gem->gem_handle,
1261 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1262 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1263 pthread_mutex_unlock(&bufmgr_gem->lock);
1268 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1270 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1271 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1277 pthread_mutex_lock(&bufmgr_gem->lock);
1279 if (bo_gem->map_count <= 0) {
1280 DBG("attempted to unmap an unmapped bo\n");
1281 pthread_mutex_unlock(&bufmgr_gem->lock);
1282 /* Preserve the old behaviour of just treating this as a
1283 * no-op rather than reporting the error.
1288 if (bo_gem->mapped_cpu_write) {
1289 struct drm_i915_gem_sw_finish sw_finish;
1291 /* Cause a flush to happen if the buffer's pinned for
1292 * scanout, so the results show up in a timely manner.
1293 * Unlike GTT set domains, this only does work if the
1294 * buffer should be scanout-related.
1296 VG_CLEAR(sw_finish);
1297 sw_finish.handle = bo_gem->gem_handle;
1298 ret = drmIoctl(bufmgr_gem->fd,
1299 DRM_IOCTL_I915_GEM_SW_FINISH,
1301 ret = ret == -1 ? -errno : 0;
1303 bo_gem->mapped_cpu_write = false;
1306 /* We need to unmap after every innovation as we cannot track
1307 * an open vma for every bo as that will exhaasut the system
1308 * limits and cause later failures.
1310 if (--bo_gem->map_count == 0) {
1311 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1312 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1315 pthread_mutex_unlock(&bufmgr_gem->lock);
1320 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1322 return drm_intel_gem_bo_unmap(bo);
1326 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1327 unsigned long size, const void *data)
1329 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1330 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1331 struct drm_i915_gem_pwrite pwrite;
1335 pwrite.handle = bo_gem->gem_handle;
1336 pwrite.offset = offset;
1338 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1339 ret = drmIoctl(bufmgr_gem->fd,
1340 DRM_IOCTL_I915_GEM_PWRITE,
1344 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1345 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1346 (int)size, strerror(errno));
1353 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1355 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1356 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1359 VG_CLEAR(get_pipe_from_crtc_id);
1360 get_pipe_from_crtc_id.crtc_id = crtc_id;
1361 ret = drmIoctl(bufmgr_gem->fd,
1362 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1363 &get_pipe_from_crtc_id);
1365 /* We return -1 here to signal that we don't
1366 * know which pipe is associated with this crtc.
1367 * This lets the caller know that this information
1368 * isn't available; using the wrong pipe for
1369 * vblank waiting can cause the chipset to lock up
1374 return get_pipe_from_crtc_id.pipe;
1378 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1379 unsigned long size, void *data)
1381 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1382 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1383 struct drm_i915_gem_pread pread;
1387 pread.handle = bo_gem->gem_handle;
1388 pread.offset = offset;
1390 pread.data_ptr = (uint64_t) (uintptr_t) data;
1391 ret = drmIoctl(bufmgr_gem->fd,
1392 DRM_IOCTL_I915_GEM_PREAD,
1396 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1397 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1398 (int)size, strerror(errno));
1404 /** Waits for all GPU rendering with the object to have completed. */
1406 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1408 drm_intel_gem_bo_start_gtt_access(bo, 1);
1412 * Sets the object to the GTT read and possibly write domain, used by the X
1413 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1415 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1416 * can do tiled pixmaps this way.
1419 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1421 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1422 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1423 struct drm_i915_gem_set_domain set_domain;
1426 VG_CLEAR(set_domain);
1427 set_domain.handle = bo_gem->gem_handle;
1428 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1429 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1430 ret = drmIoctl(bufmgr_gem->fd,
1431 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1434 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1435 __FILE__, __LINE__, bo_gem->gem_handle,
1436 set_domain.read_domains, set_domain.write_domain,
1442 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1444 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1447 free(bufmgr_gem->exec2_objects);
1448 free(bufmgr_gem->exec_objects);
1449 free(bufmgr_gem->exec_bos);
1451 pthread_mutex_destroy(&bufmgr_gem->lock);
1453 /* Free any cached buffer objects we were going to reuse */
1454 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1455 struct drm_intel_gem_bo_bucket *bucket =
1456 &bufmgr_gem->cache_bucket[i];
1457 drm_intel_bo_gem *bo_gem;
1459 while (!DRMLISTEMPTY(&bucket->head)) {
1460 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1461 bucket->head.next, head);
1462 DRMLISTDEL(&bo_gem->head);
1464 drm_intel_gem_bo_free(&bo_gem->bo);
1472 * Adds the target buffer to the validation list and adds the relocation
1473 * to the reloc_buffer's relocation list.
1475 * The relocation entry at the given offset must already contain the
1476 * precomputed relocation value, because the kernel will optimize out
1477 * the relocation entry write when the buffer hasn't moved from the
1478 * last known offset in target_bo.
1481 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1482 drm_intel_bo *target_bo, uint32_t target_offset,
1483 uint32_t read_domains, uint32_t write_domain,
1486 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1487 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1488 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1489 bool fenced_command;
1491 if (bo_gem->has_error)
1494 if (target_bo_gem->has_error) {
1495 bo_gem->has_error = true;
1499 /* We never use HW fences for rendering on 965+ */
1500 if (bufmgr_gem->gen >= 4)
1503 fenced_command = need_fence;
1504 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1507 /* Create a new relocation list if needed */
1508 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1511 /* Check overflow */
1512 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1515 assert(offset <= bo->size - 4);
1516 assert((write_domain & (write_domain - 1)) == 0);
1518 /* Make sure that we're not adding a reloc to something whose size has
1519 * already been accounted for.
1521 assert(!bo_gem->used_as_reloc_target);
1522 if (target_bo_gem != bo_gem) {
1523 target_bo_gem->used_as_reloc_target = true;
1524 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1526 /* An object needing a fence is a tiled buffer, so it won't have
1527 * relocs to other buffers.
1530 target_bo_gem->reloc_tree_fences = 1;
1531 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1533 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1534 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1535 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1536 target_bo_gem->gem_handle;
1537 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1538 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1539 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1541 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1542 if (target_bo != bo)
1543 drm_intel_gem_bo_reference(target_bo);
1545 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1546 DRM_INTEL_RELOC_FENCE;
1548 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1550 bo_gem->reloc_count++;
1556 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1557 drm_intel_bo *target_bo, uint32_t target_offset,
1558 uint32_t read_domains, uint32_t write_domain)
1560 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1562 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1563 read_domains, write_domain,
1564 !bufmgr_gem->fenced_relocs);
1568 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1569 drm_intel_bo *target_bo,
1570 uint32_t target_offset,
1571 uint32_t read_domains, uint32_t write_domain)
1573 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1574 read_domains, write_domain, true);
1578 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1580 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1582 return bo_gem->reloc_count;
1586 * Removes existing relocation entries in the BO after "start".
1588 * This allows a user to avoid a two-step process for state setup with
1589 * counting up all the buffer objects and doing a
1590 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1591 * relocations for the state setup. Instead, save the state of the
1592 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1593 * state, and then check if it still fits in the aperture.
1595 * Any further drm_intel_bufmgr_check_aperture_space() queries
1596 * involving this buffer in the tree are undefined after this call.
1599 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1601 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1603 struct timespec time;
1605 clock_gettime(CLOCK_MONOTONIC, &time);
1607 assert(bo_gem->reloc_count >= start);
1608 /* Unreference the cleared target buffers */
1609 for (i = start; i < bo_gem->reloc_count; i++) {
1610 if (bo_gem->reloc_target_info[i].bo != bo) {
1611 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1612 reloc_target_info[i].bo,
1616 bo_gem->reloc_count = start;
1620 * Walk the tree of relocations rooted at BO and accumulate the list of
1621 * validations to be performed and update the relocation buffers with
1622 * index values into the validation list.
1625 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1627 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1630 if (bo_gem->relocs == NULL)
1633 for (i = 0; i < bo_gem->reloc_count; i++) {
1634 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1636 if (target_bo == bo)
1639 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1641 /* Continue walking the tree depth-first. */
1642 drm_intel_gem_bo_process_reloc(target_bo);
1644 /* Add the target to the validate list */
1645 drm_intel_add_validate_buffer(target_bo);
1650 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1652 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1655 if (bo_gem->relocs == NULL)
1658 for (i = 0; i < bo_gem->reloc_count; i++) {
1659 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1662 if (target_bo == bo)
1665 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1667 /* Continue walking the tree depth-first. */
1668 drm_intel_gem_bo_process_reloc2(target_bo);
1670 need_fence = (bo_gem->reloc_target_info[i].flags &
1671 DRM_INTEL_RELOC_FENCE);
1673 /* Add the target to the validate list */
1674 drm_intel_add_validate_buffer2(target_bo, need_fence);
1680 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1684 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1685 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1686 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1688 /* Update the buffer offset */
1689 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1690 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1691 bo_gem->gem_handle, bo_gem->name, bo->offset,
1692 (unsigned long long)bufmgr_gem->exec_objects[i].
1694 bo->offset = bufmgr_gem->exec_objects[i].offset;
1700 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1704 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1705 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1706 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1708 /* Update the buffer offset */
1709 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1710 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1711 bo_gem->gem_handle, bo_gem->name, bo->offset,
1712 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1713 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1719 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
1720 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
1722 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1723 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1724 struct drm_i915_gem_execbuffer execbuf;
1727 if (bo_gem->has_error)
1730 pthread_mutex_lock(&bufmgr_gem->lock);
1731 /* Update indices and set up the validate list. */
1732 drm_intel_gem_bo_process_reloc(bo);
1734 /* Add the batch buffer to the validation list. There are no
1735 * relocations pointing to it.
1737 drm_intel_add_validate_buffer(bo);
1740 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1741 execbuf.buffer_count = bufmgr_gem->exec_count;
1742 execbuf.batch_start_offset = 0;
1743 execbuf.batch_len = used;
1744 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1745 execbuf.num_cliprects = num_cliprects;
1749 ret = drmIoctl(bufmgr_gem->fd,
1750 DRM_IOCTL_I915_GEM_EXECBUFFER,
1754 if (errno == ENOSPC) {
1755 DBG("Execbuffer fails to pin. "
1756 "Estimate: %u. Actual: %u. Available: %u\n",
1757 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1760 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1763 (unsigned int)bufmgr_gem->gtt_size);
1766 drm_intel_update_buffer_offsets(bufmgr_gem);
1768 if (bufmgr_gem->bufmgr.debug)
1769 drm_intel_gem_dump_validation_list(bufmgr_gem);
1771 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1772 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1773 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1775 /* Disconnect the buffer from the validate list */
1776 bo_gem->validate_index = -1;
1777 bufmgr_gem->exec_bos[i] = NULL;
1779 bufmgr_gem->exec_count = 0;
1780 pthread_mutex_unlock(&bufmgr_gem->lock);
1786 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
1787 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
1790 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1791 struct drm_i915_gem_execbuffer2 execbuf;
1795 switch (flags & 0x7) {
1799 if (!bufmgr_gem->has_blt)
1803 if (!bufmgr_gem->has_bsd)
1806 case I915_EXEC_RENDER:
1807 case I915_EXEC_DEFAULT:
1811 pthread_mutex_lock(&bufmgr_gem->lock);
1812 /* Update indices and set up the validate list. */
1813 drm_intel_gem_bo_process_reloc2(bo);
1815 /* Add the batch buffer to the validation list. There are no relocations
1818 drm_intel_add_validate_buffer2(bo, 0);
1821 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
1822 execbuf.buffer_count = bufmgr_gem->exec_count;
1823 execbuf.batch_start_offset = 0;
1824 execbuf.batch_len = used;
1825 execbuf.cliprects_ptr = (uintptr_t)cliprects;
1826 execbuf.num_cliprects = num_cliprects;
1829 execbuf.flags = flags;
1833 if (bufmgr_gem->no_exec)
1834 goto skip_execution;
1836 ret = drmIoctl(bufmgr_gem->fd,
1837 DRM_IOCTL_I915_GEM_EXECBUFFER2,
1841 if (ret == -ENOSPC) {
1842 DBG("Execbuffer fails to pin. "
1843 "Estimate: %u. Actual: %u. Available: %u\n",
1844 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1845 bufmgr_gem->exec_count),
1846 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1847 bufmgr_gem->exec_count),
1848 (unsigned int) bufmgr_gem->gtt_size);
1851 drm_intel_update_buffer_offsets2(bufmgr_gem);
1854 if (bufmgr_gem->bufmgr.debug)
1855 drm_intel_gem_dump_validation_list(bufmgr_gem);
1857 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1858 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1859 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1861 /* Disconnect the buffer from the validate list */
1862 bo_gem->validate_index = -1;
1863 bufmgr_gem->exec_bos[i] = NULL;
1865 bufmgr_gem->exec_count = 0;
1866 pthread_mutex_unlock(&bufmgr_gem->lock);
1872 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
1873 drm_clip_rect_t *cliprects, int num_cliprects,
1876 return drm_intel_gem_bo_mrb_exec2(bo, used,
1877 cliprects, num_cliprects, DR4,
1882 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
1884 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1885 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1886 struct drm_i915_gem_pin pin;
1890 pin.handle = bo_gem->gem_handle;
1891 pin.alignment = alignment;
1893 ret = drmIoctl(bufmgr_gem->fd,
1894 DRM_IOCTL_I915_GEM_PIN,
1899 bo->offset = pin.offset;
1904 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
1906 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1907 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1908 struct drm_i915_gem_unpin unpin;
1912 unpin.handle = bo_gem->gem_handle;
1914 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
1922 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
1923 uint32_t tiling_mode,
1926 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1927 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1928 struct drm_i915_gem_set_tiling set_tiling;
1931 if (bo_gem->global_name == 0 &&
1932 tiling_mode == bo_gem->tiling_mode &&
1933 stride == bo_gem->stride)
1936 memset(&set_tiling, 0, sizeof(set_tiling));
1938 /* set_tiling is slightly broken and overwrites the
1939 * input on the error path, so we have to open code
1942 set_tiling.handle = bo_gem->gem_handle;
1943 set_tiling.tiling_mode = tiling_mode;
1944 set_tiling.stride = stride;
1946 ret = ioctl(bufmgr_gem->fd,
1947 DRM_IOCTL_I915_GEM_SET_TILING,
1949 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1953 bo_gem->tiling_mode = set_tiling.tiling_mode;
1954 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
1955 bo_gem->stride = set_tiling.stride;
1960 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1963 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1964 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1967 /* Linear buffers have no stride. By ensuring that we only ever use
1968 * stride 0 with linear buffers, we simplify our code.
1970 if (*tiling_mode == I915_TILING_NONE)
1973 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
1975 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
1977 *tiling_mode = bo_gem->tiling_mode;
1982 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1983 uint32_t * swizzle_mode)
1985 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1987 *tiling_mode = bo_gem->tiling_mode;
1988 *swizzle_mode = bo_gem->swizzle_mode;
1993 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
1995 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1996 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1999 if (!bo_gem->global_name) {
2000 struct drm_gem_flink flink;
2003 flink.handle = bo_gem->gem_handle;
2005 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
2009 bo_gem->global_name = flink.name;
2010 bo_gem->reusable = false;
2012 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2015 *name = bo_gem->global_name;
2020 * Enables unlimited caching of buffer objects for reuse.
2022 * This is potentially very memory expensive, as the cache at each bucket
2023 * size is only bounded by how many buffers of that size we've managed to have
2024 * in flight at once.
2027 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2029 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2031 bufmgr_gem->bo_reuse = true;
2035 * Enable use of fenced reloc type.
2037 * New code should enable this to avoid unnecessary fence register
2038 * allocation. If this option is not enabled, all relocs will have fence
2039 * register allocated.
2042 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2044 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2046 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2047 bufmgr_gem->fenced_relocs = true;
2051 * Return the additional aperture space required by the tree of buffer objects
2055 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2057 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2061 if (bo == NULL || bo_gem->included_in_check_aperture)
2065 bo_gem->included_in_check_aperture = true;
2067 for (i = 0; i < bo_gem->reloc_count; i++)
2069 drm_intel_gem_bo_get_aperture_space(bo_gem->
2070 reloc_target_info[i].bo);
2076 * Count the number of buffers in this list that need a fence reg
2078 * If the count is greater than the number of available regs, we'll have
2079 * to ask the caller to resubmit a batch with fewer tiled buffers.
2081 * This function over-counts if the same buffer is used multiple times.
2084 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2087 unsigned int total = 0;
2089 for (i = 0; i < count; i++) {
2090 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2095 total += bo_gem->reloc_tree_fences;
2101 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2102 * for the next drm_intel_bufmgr_check_aperture_space() call.
2105 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2107 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2110 if (bo == NULL || !bo_gem->included_in_check_aperture)
2113 bo_gem->included_in_check_aperture = false;
2115 for (i = 0; i < bo_gem->reloc_count; i++)
2116 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2117 reloc_target_info[i].bo);
2121 * Return a conservative estimate for the amount of aperture required
2122 * for a collection of buffers. This may double-count some buffers.
2125 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2128 unsigned int total = 0;
2130 for (i = 0; i < count; i++) {
2131 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2133 total += bo_gem->reloc_tree_size;
2139 * Return the amount of aperture needed for a collection of buffers.
2140 * This avoids double counting any buffers, at the cost of looking
2141 * at every buffer in the set.
2144 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2147 unsigned int total = 0;
2149 for (i = 0; i < count; i++) {
2150 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2151 /* For the first buffer object in the array, we get an
2152 * accurate count back for its reloc_tree size (since nothing
2153 * had been flagged as being counted yet). We can save that
2154 * value out as a more conservative reloc_tree_size that
2155 * avoids double-counting target buffers. Since the first
2156 * buffer happens to usually be the batch buffer in our
2157 * callers, this can pull us back from doing the tree
2158 * walk on every new batch emit.
2161 drm_intel_bo_gem *bo_gem =
2162 (drm_intel_bo_gem *) bo_array[i];
2163 bo_gem->reloc_tree_size = total;
2167 for (i = 0; i < count; i++)
2168 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2173 * Return -1 if the batchbuffer should be flushed before attempting to
2174 * emit rendering referencing the buffers pointed to by bo_array.
2176 * This is required because if we try to emit a batchbuffer with relocations
2177 * to a tree of buffers that won't simultaneously fit in the aperture,
2178 * the rendering will return an error at a point where the software is not
2179 * prepared to recover from it.
2181 * However, we also want to emit the batchbuffer significantly before we reach
2182 * the limit, as a series of batchbuffers each of which references buffers
2183 * covering almost all of the aperture means that at each emit we end up
2184 * waiting to evict a buffer from the last rendering, and we get synchronous
2185 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2186 * get better parallelism.
2189 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2191 drm_intel_bufmgr_gem *bufmgr_gem =
2192 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2193 unsigned int total = 0;
2194 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2197 /* Check for fence reg constraints if necessary */
2198 if (bufmgr_gem->available_fences) {
2199 total_fences = drm_intel_gem_total_fences(bo_array, count);
2200 if (total_fences > bufmgr_gem->available_fences)
2204 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2206 if (total > threshold)
2207 total = drm_intel_gem_compute_batch_space(bo_array, count);
2209 if (total > threshold) {
2210 DBG("check_space: overflowed available aperture, "
2212 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2215 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2216 (int)bufmgr_gem->gtt_size / 1024);
2222 * Disable buffer reuse for objects which are shared with the kernel
2223 * as scanout buffers
2226 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2228 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2230 bo_gem->reusable = false;
2235 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2237 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2239 return bo_gem->reusable;
2243 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2245 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2248 for (i = 0; i < bo_gem->reloc_count; i++) {
2249 if (bo_gem->reloc_target_info[i].bo == target_bo)
2251 if (bo == bo_gem->reloc_target_info[i].bo)
2253 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2261 /** Return true if target_bo is referenced by bo's relocation tree. */
2263 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2265 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2267 if (bo == NULL || target_bo == NULL)
2269 if (target_bo_gem->used_as_reloc_target)
2270 return _drm_intel_gem_bo_references(bo, target_bo);
2275 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2277 unsigned int i = bufmgr_gem->num_buckets;
2279 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2281 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2282 bufmgr_gem->cache_bucket[i].size = size;
2283 bufmgr_gem->num_buckets++;
2287 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2289 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2291 /* OK, so power of two buckets was too wasteful of memory.
2292 * Give 3 other sizes between each power of two, to hopefully
2293 * cover things accurately enough. (The alternative is
2294 * probably to just go for exact matching of sizes, and assume
2295 * that for things like composited window resize the tiled
2296 * width/height alignment and rounding of sizes to pages will
2297 * get us useful cache hit rates anyway)
2299 add_bucket(bufmgr_gem, 4096);
2300 add_bucket(bufmgr_gem, 4096 * 2);
2301 add_bucket(bufmgr_gem, 4096 * 3);
2303 /* Initialize the linked lists for BO reuse cache. */
2304 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2305 add_bucket(bufmgr_gem, size);
2307 add_bucket(bufmgr_gem, size + size * 1 / 4);
2308 add_bucket(bufmgr_gem, size + size * 2 / 4);
2309 add_bucket(bufmgr_gem, size + size * 3 / 4);
2314 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
2316 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2318 bufmgr_gem->vma_max = limit;
2320 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
2324 * Get the PCI ID for the device. This can be overridden by setting the
2325 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
2328 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
2330 char *devid_override;
2333 drm_i915_getparam_t gp;
2335 if (geteuid() == getuid()) {
2336 devid_override = getenv("INTEL_DEVID_OVERRIDE");
2337 if (devid_override) {
2338 bufmgr_gem->no_exec = true;
2339 return strtod(devid_override, NULL);
2344 gp.param = I915_PARAM_CHIPSET_ID;
2346 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2348 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2349 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2355 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
2357 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2359 return bufmgr_gem->pci_device;
2363 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
2364 * and manage map buffer objections.
2366 * \param fd File descriptor of the opened DRM device.
2369 drm_intel_bufmgr_gem_init(int fd, int batch_size)
2371 drm_intel_bufmgr_gem *bufmgr_gem;
2372 struct drm_i915_gem_get_aperture aperture;
2373 drm_i915_getparam_t gp;
2377 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
2378 if (bufmgr_gem == NULL)
2381 bufmgr_gem->fd = fd;
2383 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
2388 ret = drmIoctl(bufmgr_gem->fd,
2389 DRM_IOCTL_I915_GEM_GET_APERTURE,
2393 bufmgr_gem->gtt_size = aperture.aper_available_size;
2395 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
2397 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
2398 fprintf(stderr, "Assuming %dkB available aperture size.\n"
2399 "May lead to reduced performance or incorrect "
2401 (int)bufmgr_gem->gtt_size / 1024);
2404 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
2406 if (IS_GEN2(bufmgr_gem->pci_device))
2407 bufmgr_gem->gen = 2;
2408 else if (IS_GEN3(bufmgr_gem->pci_device))
2409 bufmgr_gem->gen = 3;
2410 else if (IS_GEN4(bufmgr_gem->pci_device))
2411 bufmgr_gem->gen = 4;
2412 else if (IS_GEN5(bufmgr_gem->pci_device))
2413 bufmgr_gem->gen = 5;
2414 else if (IS_GEN6(bufmgr_gem->pci_device))
2415 bufmgr_gem->gen = 6;
2416 else if (IS_GEN7(bufmgr_gem->pci_device))
2417 bufmgr_gem->gen = 7;
2421 if (IS_GEN3(bufmgr_gem->pci_device) &&
2422 bufmgr_gem->gtt_size > 256*1024*1024) {
2423 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
2424 * be used for tiled blits. To simplify the accounting, just
2425 * substract the unmappable part (fixed to 256MB on all known
2426 * gen3 devices) if the kernel advertises it. */
2427 bufmgr_gem->gtt_size -= 256*1024*1024;
2432 gp.param = I915_PARAM_HAS_EXECBUF2;
2433 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2437 gp.param = I915_PARAM_HAS_BSD;
2438 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2439 bufmgr_gem->has_bsd = ret == 0;
2441 gp.param = I915_PARAM_HAS_BLT;
2442 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2443 bufmgr_gem->has_blt = ret == 0;
2445 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
2446 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2447 bufmgr_gem->has_relaxed_fencing = ret == 0;
2449 gp.param = I915_PARAM_HAS_LLC;
2450 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2451 if (ret == -EINVAL) {
2452 /* Kernel does not supports HAS_LLC query, fallback to GPU
2453 * generation detection and assume that we have LLC on GEN6/7
2455 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
2456 IS_GEN7(bufmgr_gem->pci_device));
2458 bufmgr_gem->has_llc = ret == 0;
2460 if (bufmgr_gem->gen < 4) {
2461 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
2462 gp.value = &bufmgr_gem->available_fences;
2463 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2465 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
2467 fprintf(stderr, "param: %d, val: %d\n", gp.param,
2469 bufmgr_gem->available_fences = 0;
2471 /* XXX The kernel reports the total number of fences,
2472 * including any that may be pinned.
2474 * We presume that there will be at least one pinned
2475 * fence for the scanout buffer, but there may be more
2476 * than one scanout and the user may be manually
2477 * pinning buffers. Let's move to execbuffer2 and
2478 * thereby forget the insanity of using fences...
2480 bufmgr_gem->available_fences -= 2;
2481 if (bufmgr_gem->available_fences < 0)
2482 bufmgr_gem->available_fences = 0;
2486 /* Let's go with one relocation per every 2 dwords (but round down a bit
2487 * since a power of two will mean an extra page allocation for the reloc
2490 * Every 4 was too few for the blender benchmark.
2492 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
2494 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
2495 bufmgr_gem->bufmgr.bo_alloc_for_render =
2496 drm_intel_gem_bo_alloc_for_render;
2497 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
2498 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
2499 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
2500 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
2501 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
2502 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
2503 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
2504 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
2505 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
2506 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
2507 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
2508 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
2509 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
2510 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
2511 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
2512 /* Use the new one if available */
2514 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
2515 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
2517 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
2518 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
2519 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
2520 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
2521 bufmgr_gem->bufmgr.debug = 0;
2522 bufmgr_gem->bufmgr.check_aperture_space =
2523 drm_intel_gem_check_aperture_space;
2524 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
2525 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
2526 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
2527 drm_intel_gem_get_pipe_from_crtc_id;
2528 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
2530 DRMINITLISTHEAD(&bufmgr_gem->named);
2531 init_cache_buckets(bufmgr_gem);
2533 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
2534 bufmgr_gem->vma_max = -1; /* unlimited by default */
2536 return &bufmgr_gem->bufmgr;