1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
53 #include <sys/types.h>
57 #include "libdrm_lists.h"
58 #include "intel_bufmgr.h"
59 #include "intel_bufmgr_priv.h"
60 #include "intel_chipset.h"
61 #include "intel_aub.h"
74 #define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
76 #define DBG(...) do { \
77 if (bufmgr_gem->bufmgr.debug) \
78 fprintf(stderr, __VA_ARGS__); \
81 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
83 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
85 struct drm_intel_gem_bo_bucket {
90 typedef struct _drm_intel_bufmgr_gem {
91 drm_intel_bufmgr bufmgr;
99 struct drm_i915_gem_exec_object *exec_objects;
100 struct drm_i915_gem_exec_object2 *exec2_objects;
101 drm_intel_bo **exec_bos;
105 /** Array of lists of cached gem objects of power-of-two sizes */
106 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
111 drmMMListHead vma_cache;
112 int vma_count, vma_open, vma_max;
115 int available_fences;
118 unsigned int has_bsd : 1;
119 unsigned int has_blt : 1;
120 unsigned int has_relaxed_fencing : 1;
121 unsigned int has_llc : 1;
122 unsigned int has_wait_timeout : 1;
123 unsigned int bo_reuse : 1;
124 unsigned int no_exec : 1;
129 } drm_intel_bufmgr_gem;
131 #define DRM_INTEL_RELOC_FENCE (1<<0)
133 typedef struct _drm_intel_reloc_target_info {
136 } drm_intel_reloc_target;
138 struct _drm_intel_bo_gem {
146 * Kenel-assigned global name for this object
148 unsigned int global_name;
149 drmMMListHead name_list;
152 * Index of the buffer within the validation list while preparing a
153 * batchbuffer execution.
158 * Current tiling mode
160 uint32_t tiling_mode;
161 uint32_t swizzle_mode;
162 unsigned long stride;
166 /** Array passed to the DRM containing relocation information. */
167 struct drm_i915_gem_relocation_entry *relocs;
169 * Array of info structs corresponding to relocs[i].target_handle etc
171 drm_intel_reloc_target *reloc_target_info;
172 /** Number of entries in relocs */
174 /** Mapped address for the buffer, saved across map/unmap cycles */
176 /** GTT virtual address for the buffer, saved across map/unmap cycles */
179 drmMMListHead vma_list;
185 * Boolean of whether this BO and its children have been included in
186 * the current drm_intel_bufmgr_check_aperture_space() total.
188 bool included_in_check_aperture;
191 * Boolean of whether this buffer has been used as a relocation
192 * target and had its size accounted for, and thus can't have any
193 * further relocations added to it.
195 bool used_as_reloc_target;
198 * Boolean of whether we have encountered an error whilst building the relocation tree.
203 * Boolean of whether this buffer can be re-used
208 * Size in bytes of this buffer and its relocation descendents.
210 * Used to avoid costly tree walking in
211 * drm_intel_bufmgr_check_aperture in the common case.
216 * Number of potential fence registers required by this buffer and its
219 int reloc_tree_fences;
221 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
222 bool mapped_cpu_write;
226 drm_intel_aub_annotation *aub_annotations;
227 unsigned aub_annotation_count;
231 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
234 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
237 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
238 uint32_t * swizzle_mode);
241 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
242 uint32_t tiling_mode,
245 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
248 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
250 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
253 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
254 uint32_t *tiling_mode)
256 unsigned long min_size, max_size;
259 if (*tiling_mode == I915_TILING_NONE)
262 /* 965+ just need multiples of page size for tiling */
263 if (bufmgr_gem->gen >= 4)
264 return ROUND_UP_TO(size, 4096);
266 /* Older chips need powers of two, of at least 512k or 1M */
267 if (bufmgr_gem->gen == 3) {
268 min_size = 1024*1024;
269 max_size = 128*1024*1024;
272 max_size = 64*1024*1024;
275 if (size > max_size) {
276 *tiling_mode = I915_TILING_NONE;
280 /* Do we need to allocate every page for the fence? */
281 if (bufmgr_gem->has_relaxed_fencing)
282 return ROUND_UP_TO(size, 4096);
284 for (i = min_size; i < size; i <<= 1)
291 * Round a given pitch up to the minimum required for X tiling on a
292 * given chip. We use 512 as the minimum to allow for a later tiling
296 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
297 unsigned long pitch, uint32_t *tiling_mode)
299 unsigned long tile_width;
302 /* If untiled, then just align it so that we can do rendering
303 * to it with the 3D engine.
305 if (*tiling_mode == I915_TILING_NONE)
306 return ALIGN(pitch, 64);
308 if (*tiling_mode == I915_TILING_X
309 || (IS_915(bufmgr_gem->pci_device)
310 && *tiling_mode == I915_TILING_Y))
315 /* 965 is flexible */
316 if (bufmgr_gem->gen >= 4)
317 return ROUND_UP_TO(pitch, tile_width);
319 /* The older hardware has a maximum pitch of 8192 with tiled
320 * surfaces, so fallback to untiled if it's too large.
323 *tiling_mode = I915_TILING_NONE;
324 return ALIGN(pitch, 64);
327 /* Pre-965 needs power of two tile width */
328 for (i = tile_width; i < pitch; i <<= 1)
334 static struct drm_intel_gem_bo_bucket *
335 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
340 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
341 struct drm_intel_gem_bo_bucket *bucket =
342 &bufmgr_gem->cache_bucket[i];
343 if (bucket->size >= size) {
352 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
356 for (i = 0; i < bufmgr_gem->exec_count; i++) {
357 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
358 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
360 if (bo_gem->relocs == NULL) {
361 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
366 for (j = 0; j < bo_gem->reloc_count; j++) {
367 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
368 drm_intel_bo_gem *target_gem =
369 (drm_intel_bo_gem *) target_bo;
371 DBG("%2d: %d (%s)@0x%08llx -> "
372 "%d (%s)@0x%08lx + 0x%08x\n",
374 bo_gem->gem_handle, bo_gem->name,
375 (unsigned long long)bo_gem->relocs[j].offset,
376 target_gem->gem_handle,
379 bo_gem->relocs[j].delta);
385 drm_intel_gem_bo_reference(drm_intel_bo *bo)
387 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
389 atomic_inc(&bo_gem->refcount);
393 * Adds the given buffer to the list of buffers to be validated (moved into the
394 * appropriate memory type) with the next batch submission.
396 * If a buffer is validated multiple times in a batch submission, it ends up
397 * with the intersection of the memory type flags and the union of the
401 drm_intel_add_validate_buffer(drm_intel_bo *bo)
403 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
404 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
407 if (bo_gem->validate_index != -1)
410 /* Extend the array of validation entries as necessary. */
411 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
412 int new_size = bufmgr_gem->exec_size * 2;
417 bufmgr_gem->exec_objects =
418 realloc(bufmgr_gem->exec_objects,
419 sizeof(*bufmgr_gem->exec_objects) * new_size);
420 bufmgr_gem->exec_bos =
421 realloc(bufmgr_gem->exec_bos,
422 sizeof(*bufmgr_gem->exec_bos) * new_size);
423 bufmgr_gem->exec_size = new_size;
426 index = bufmgr_gem->exec_count;
427 bo_gem->validate_index = index;
428 /* Fill in array entry */
429 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
430 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
431 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
432 bufmgr_gem->exec_objects[index].alignment = 0;
433 bufmgr_gem->exec_objects[index].offset = 0;
434 bufmgr_gem->exec_bos[index] = bo;
435 bufmgr_gem->exec_count++;
439 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
441 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
442 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
445 if (bo_gem->validate_index != -1) {
447 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
448 EXEC_OBJECT_NEEDS_FENCE;
452 /* Extend the array of validation entries as necessary. */
453 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
454 int new_size = bufmgr_gem->exec_size * 2;
459 bufmgr_gem->exec2_objects =
460 realloc(bufmgr_gem->exec2_objects,
461 sizeof(*bufmgr_gem->exec2_objects) * new_size);
462 bufmgr_gem->exec_bos =
463 realloc(bufmgr_gem->exec_bos,
464 sizeof(*bufmgr_gem->exec_bos) * new_size);
465 bufmgr_gem->exec_size = new_size;
468 index = bufmgr_gem->exec_count;
469 bo_gem->validate_index = index;
470 /* Fill in array entry */
471 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
472 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
473 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
474 bufmgr_gem->exec2_objects[index].alignment = 0;
475 bufmgr_gem->exec2_objects[index].offset = 0;
476 bufmgr_gem->exec_bos[index] = bo;
477 bufmgr_gem->exec2_objects[index].flags = 0;
478 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
479 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
481 bufmgr_gem->exec2_objects[index].flags |=
482 EXEC_OBJECT_NEEDS_FENCE;
484 bufmgr_gem->exec_count++;
487 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
491 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
492 drm_intel_bo_gem *bo_gem)
496 assert(!bo_gem->used_as_reloc_target);
498 /* The older chipsets are far-less flexible in terms of tiling,
499 * and require tiled buffer to be size aligned in the aperture.
500 * This means that in the worst possible case we will need a hole
501 * twice as large as the object in order for it to fit into the
502 * aperture. Optimal packing is for wimps.
504 size = bo_gem->bo.size;
505 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
508 if (bufmgr_gem->has_relaxed_fencing) {
509 if (bufmgr_gem->gen == 3)
510 min_size = 1024*1024;
514 while (min_size < size)
519 /* Account for worst-case alignment. */
523 bo_gem->reloc_tree_size = size;
527 drm_intel_setup_reloc_list(drm_intel_bo *bo)
529 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
530 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
531 unsigned int max_relocs = bufmgr_gem->max_relocs;
533 if (bo->size / 4 < max_relocs)
534 max_relocs = bo->size / 4;
536 bo_gem->relocs = malloc(max_relocs *
537 sizeof(struct drm_i915_gem_relocation_entry));
538 bo_gem->reloc_target_info = malloc(max_relocs *
539 sizeof(drm_intel_reloc_target));
540 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
541 bo_gem->has_error = true;
543 free (bo_gem->relocs);
544 bo_gem->relocs = NULL;
546 free (bo_gem->reloc_target_info);
547 bo_gem->reloc_target_info = NULL;
556 drm_intel_gem_bo_busy(drm_intel_bo *bo)
558 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
559 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
560 struct drm_i915_gem_busy busy;
564 busy.handle = bo_gem->gem_handle;
566 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
568 return (ret == 0 && busy.busy);
572 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
573 drm_intel_bo_gem *bo_gem, int state)
575 struct drm_i915_gem_madvise madv;
578 madv.handle = bo_gem->gem_handle;
581 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
583 return madv.retained;
587 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
589 return drm_intel_gem_bo_madvise_internal
590 ((drm_intel_bufmgr_gem *) bo->bufmgr,
591 (drm_intel_bo_gem *) bo,
595 /* drop the oldest entries that have been purged by the kernel */
597 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
598 struct drm_intel_gem_bo_bucket *bucket)
600 while (!DRMLISTEMPTY(&bucket->head)) {
601 drm_intel_bo_gem *bo_gem;
603 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
604 bucket->head.next, head);
605 if (drm_intel_gem_bo_madvise_internal
606 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
609 DRMLISTDEL(&bo_gem->head);
610 drm_intel_gem_bo_free(&bo_gem->bo);
614 static drm_intel_bo *
615 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
619 uint32_t tiling_mode,
620 unsigned long stride)
622 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
623 drm_intel_bo_gem *bo_gem;
624 unsigned int page_size = getpagesize();
626 struct drm_intel_gem_bo_bucket *bucket;
627 bool alloc_from_cache;
628 unsigned long bo_size;
629 bool for_render = false;
631 if (flags & BO_ALLOC_FOR_RENDER)
634 /* Round the allocated size up to a power of two number of pages. */
635 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
637 /* If we don't have caching at this size, don't actually round the
640 if (bucket == NULL) {
642 if (bo_size < page_size)
645 bo_size = bucket->size;
648 pthread_mutex_lock(&bufmgr_gem->lock);
649 /* Get a buffer out of the cache if available */
651 alloc_from_cache = false;
652 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
654 /* Allocate new render-target BOs from the tail (MRU)
655 * of the list, as it will likely be hot in the GPU
656 * cache and in the aperture for us.
658 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
659 bucket->head.prev, head);
660 DRMLISTDEL(&bo_gem->head);
661 alloc_from_cache = true;
663 /* For non-render-target BOs (where we're probably
664 * going to map it first thing in order to fill it
665 * with data), check if the last BO in the cache is
666 * unbusy, and only reuse in that case. Otherwise,
667 * allocating a new buffer is probably faster than
668 * waiting for the GPU to finish.
670 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
671 bucket->head.next, head);
672 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
673 alloc_from_cache = true;
674 DRMLISTDEL(&bo_gem->head);
678 if (alloc_from_cache) {
679 if (!drm_intel_gem_bo_madvise_internal
680 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
681 drm_intel_gem_bo_free(&bo_gem->bo);
682 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
687 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
690 drm_intel_gem_bo_free(&bo_gem->bo);
695 pthread_mutex_unlock(&bufmgr_gem->lock);
697 if (!alloc_from_cache) {
698 struct drm_i915_gem_create create;
700 bo_gem = calloc(1, sizeof(*bo_gem));
704 bo_gem->bo.size = bo_size;
707 create.size = bo_size;
709 ret = drmIoctl(bufmgr_gem->fd,
710 DRM_IOCTL_I915_GEM_CREATE,
712 bo_gem->gem_handle = create.handle;
713 bo_gem->bo.handle = bo_gem->gem_handle;
718 bo_gem->bo.bufmgr = bufmgr;
720 bo_gem->tiling_mode = I915_TILING_NONE;
721 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
724 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
727 drm_intel_gem_bo_free(&bo_gem->bo);
731 DRMINITLISTHEAD(&bo_gem->name_list);
732 DRMINITLISTHEAD(&bo_gem->vma_list);
736 atomic_set(&bo_gem->refcount, 1);
737 bo_gem->validate_index = -1;
738 bo_gem->reloc_tree_fences = 0;
739 bo_gem->used_as_reloc_target = false;
740 bo_gem->has_error = false;
741 bo_gem->reusable = true;
742 bo_gem->aub_annotations = NULL;
743 bo_gem->aub_annotation_count = 0;
745 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
747 DBG("bo_create: buf %d (%s) %ldb\n",
748 bo_gem->gem_handle, bo_gem->name, size);
753 static drm_intel_bo *
754 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
757 unsigned int alignment)
759 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
761 I915_TILING_NONE, 0);
764 static drm_intel_bo *
765 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
768 unsigned int alignment)
770 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
771 I915_TILING_NONE, 0);
774 static drm_intel_bo *
775 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
776 int x, int y, int cpp, uint32_t *tiling_mode,
777 unsigned long *pitch, unsigned long flags)
779 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
780 unsigned long size, stride;
784 unsigned long aligned_y, height_alignment;
786 tiling = *tiling_mode;
788 /* If we're tiled, our allocations are in 8 or 32-row blocks,
789 * so failure to align our height means that we won't allocate
792 * If we're untiled, we still have to align to 2 rows high
793 * because the data port accesses 2x2 blocks even if the
794 * bottom row isn't to be rendered, so failure to align means
795 * we could walk off the end of the GTT and fault. This is
796 * documented on 965, and may be the case on older chipsets
797 * too so we try to be careful.
800 height_alignment = 2;
802 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
803 height_alignment = 16;
804 else if (tiling == I915_TILING_X
805 || (IS_915(bufmgr_gem->pci_device)
806 && tiling == I915_TILING_Y))
807 height_alignment = 8;
808 else if (tiling == I915_TILING_Y)
809 height_alignment = 32;
810 aligned_y = ALIGN(y, height_alignment);
813 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
814 size = stride * aligned_y;
815 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
816 } while (*tiling_mode != tiling);
819 if (tiling == I915_TILING_NONE)
822 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
827 * Returns a drm_intel_bo wrapping the given buffer object handle.
829 * This can be used when one application needs to pass a buffer object
833 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
837 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
838 drm_intel_bo_gem *bo_gem;
840 struct drm_gem_open open_arg;
841 struct drm_i915_gem_get_tiling get_tiling;
844 /* At the moment most applications only have a few named bo.
845 * For instance, in a DRI client only the render buffers passed
846 * between X and the client are named. And since X returns the
847 * alternating names for the front/back buffer a linear search
848 * provides a sufficiently fast match.
850 for (list = bufmgr_gem->named.next;
851 list != &bufmgr_gem->named;
853 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
854 if (bo_gem->global_name == handle) {
855 drm_intel_gem_bo_reference(&bo_gem->bo);
860 bo_gem = calloc(1, sizeof(*bo_gem));
865 open_arg.name = handle;
866 ret = drmIoctl(bufmgr_gem->fd,
870 DBG("Couldn't reference %s handle 0x%08x: %s\n",
871 name, handle, strerror(errno));
875 bo_gem->bo.size = open_arg.size;
876 bo_gem->bo.offset = 0;
877 bo_gem->bo.virtual = NULL;
878 bo_gem->bo.bufmgr = bufmgr;
880 atomic_set(&bo_gem->refcount, 1);
881 bo_gem->validate_index = -1;
882 bo_gem->gem_handle = open_arg.handle;
883 bo_gem->bo.handle = open_arg.handle;
884 bo_gem->global_name = handle;
885 bo_gem->reusable = false;
887 VG_CLEAR(get_tiling);
888 get_tiling.handle = bo_gem->gem_handle;
889 ret = drmIoctl(bufmgr_gem->fd,
890 DRM_IOCTL_I915_GEM_GET_TILING,
893 drm_intel_gem_bo_unreference(&bo_gem->bo);
896 bo_gem->tiling_mode = get_tiling.tiling_mode;
897 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
898 /* XXX stride is unknown */
899 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
901 DRMINITLISTHEAD(&bo_gem->vma_list);
902 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
903 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
909 drm_intel_gem_bo_free(drm_intel_bo *bo)
911 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
912 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
913 struct drm_gem_close close;
916 DRMLISTDEL(&bo_gem->vma_list);
917 if (bo_gem->mem_virtual) {
918 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
919 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
920 bufmgr_gem->vma_count--;
922 if (bo_gem->gtt_virtual) {
923 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
924 bufmgr_gem->vma_count--;
927 /* Close this object */
929 close.handle = bo_gem->gem_handle;
930 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
932 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
933 bo_gem->gem_handle, bo_gem->name, strerror(errno));
935 free(bo_gem->aub_annotations);
940 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
943 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
945 if (bo_gem->mem_virtual)
946 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
948 if (bo_gem->gtt_virtual)
949 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
953 /** Frees all cached buffers significantly older than @time. */
955 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
959 if (bufmgr_gem->time == time)
962 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
963 struct drm_intel_gem_bo_bucket *bucket =
964 &bufmgr_gem->cache_bucket[i];
966 while (!DRMLISTEMPTY(&bucket->head)) {
967 drm_intel_bo_gem *bo_gem;
969 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
970 bucket->head.next, head);
971 if (time - bo_gem->free_time <= 1)
974 DRMLISTDEL(&bo_gem->head);
976 drm_intel_gem_bo_free(&bo_gem->bo);
980 bufmgr_gem->time = time;
983 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
987 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
988 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
990 if (bufmgr_gem->vma_max < 0)
993 /* We may need to evict a few entries in order to create new mmaps */
994 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
998 while (bufmgr_gem->vma_count > limit) {
999 drm_intel_bo_gem *bo_gem;
1001 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1002 bufmgr_gem->vma_cache.next,
1004 assert(bo_gem->map_count == 0);
1005 DRMLISTDELINIT(&bo_gem->vma_list);
1007 if (bo_gem->mem_virtual) {
1008 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1009 bo_gem->mem_virtual = NULL;
1010 bufmgr_gem->vma_count--;
1012 if (bo_gem->gtt_virtual) {
1013 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1014 bo_gem->gtt_virtual = NULL;
1015 bufmgr_gem->vma_count--;
1020 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1021 drm_intel_bo_gem *bo_gem)
1023 bufmgr_gem->vma_open--;
1024 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1025 if (bo_gem->mem_virtual)
1026 bufmgr_gem->vma_count++;
1027 if (bo_gem->gtt_virtual)
1028 bufmgr_gem->vma_count++;
1029 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1032 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1033 drm_intel_bo_gem *bo_gem)
1035 bufmgr_gem->vma_open++;
1036 DRMLISTDEL(&bo_gem->vma_list);
1037 if (bo_gem->mem_virtual)
1038 bufmgr_gem->vma_count--;
1039 if (bo_gem->gtt_virtual)
1040 bufmgr_gem->vma_count--;
1041 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1045 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1047 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1048 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1049 struct drm_intel_gem_bo_bucket *bucket;
1052 /* Unreference all the target buffers */
1053 for (i = 0; i < bo_gem->reloc_count; i++) {
1054 if (bo_gem->reloc_target_info[i].bo != bo) {
1055 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1056 reloc_target_info[i].bo,
1060 bo_gem->reloc_count = 0;
1061 bo_gem->used_as_reloc_target = false;
1063 DBG("bo_unreference final: %d (%s)\n",
1064 bo_gem->gem_handle, bo_gem->name);
1066 /* release memory associated with this object */
1067 if (bo_gem->reloc_target_info) {
1068 free(bo_gem->reloc_target_info);
1069 bo_gem->reloc_target_info = NULL;
1071 if (bo_gem->relocs) {
1072 free(bo_gem->relocs);
1073 bo_gem->relocs = NULL;
1076 /* Clear any left-over mappings */
1077 if (bo_gem->map_count) {
1078 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1079 bo_gem->map_count = 0;
1080 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1081 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1084 DRMLISTDEL(&bo_gem->name_list);
1086 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1087 /* Put the buffer into our internal cache for reuse if we can. */
1088 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1089 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1090 I915_MADV_DONTNEED)) {
1091 bo_gem->free_time = time;
1093 bo_gem->name = NULL;
1094 bo_gem->validate_index = -1;
1096 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1098 drm_intel_gem_bo_free(bo);
1102 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1105 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1107 assert(atomic_read(&bo_gem->refcount) > 0);
1108 if (atomic_dec_and_test(&bo_gem->refcount))
1109 drm_intel_gem_bo_unreference_final(bo, time);
1112 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1114 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1116 assert(atomic_read(&bo_gem->refcount) > 0);
1117 if (atomic_dec_and_test(&bo_gem->refcount)) {
1118 drm_intel_bufmgr_gem *bufmgr_gem =
1119 (drm_intel_bufmgr_gem *) bo->bufmgr;
1120 struct timespec time;
1122 clock_gettime(CLOCK_MONOTONIC, &time);
1124 pthread_mutex_lock(&bufmgr_gem->lock);
1125 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1126 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1127 pthread_mutex_unlock(&bufmgr_gem->lock);
1131 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1133 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1134 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1135 struct drm_i915_gem_set_domain set_domain;
1138 pthread_mutex_lock(&bufmgr_gem->lock);
1140 if (bo_gem->map_count++ == 0)
1141 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1143 if (!bo_gem->mem_virtual) {
1144 struct drm_i915_gem_mmap mmap_arg;
1146 DBG("bo_map: %d (%s), map_count=%d\n",
1147 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1150 mmap_arg.handle = bo_gem->gem_handle;
1151 mmap_arg.offset = 0;
1152 mmap_arg.size = bo->size;
1153 ret = drmIoctl(bufmgr_gem->fd,
1154 DRM_IOCTL_I915_GEM_MMAP,
1158 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1159 __FILE__, __LINE__, bo_gem->gem_handle,
1160 bo_gem->name, strerror(errno));
1161 if (--bo_gem->map_count == 0)
1162 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1163 pthread_mutex_unlock(&bufmgr_gem->lock);
1166 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1167 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1169 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1170 bo_gem->mem_virtual);
1171 bo->virtual = bo_gem->mem_virtual;
1173 VG_CLEAR(set_domain);
1174 set_domain.handle = bo_gem->gem_handle;
1175 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1177 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1179 set_domain.write_domain = 0;
1180 ret = drmIoctl(bufmgr_gem->fd,
1181 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1184 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1185 __FILE__, __LINE__, bo_gem->gem_handle,
1190 bo_gem->mapped_cpu_write = true;
1192 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1193 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1194 pthread_mutex_unlock(&bufmgr_gem->lock);
1200 map_gtt(drm_intel_bo *bo)
1202 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1203 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1206 if (bo_gem->map_count++ == 0)
1207 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1209 /* Get a mapping of the buffer if we haven't before. */
1210 if (bo_gem->gtt_virtual == NULL) {
1211 struct drm_i915_gem_mmap_gtt mmap_arg;
1213 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1214 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1217 mmap_arg.handle = bo_gem->gem_handle;
1219 /* Get the fake offset back... */
1220 ret = drmIoctl(bufmgr_gem->fd,
1221 DRM_IOCTL_I915_GEM_MMAP_GTT,
1225 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1227 bo_gem->gem_handle, bo_gem->name,
1229 if (--bo_gem->map_count == 0)
1230 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1235 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1236 MAP_SHARED, bufmgr_gem->fd,
1238 if (bo_gem->gtt_virtual == MAP_FAILED) {
1239 bo_gem->gtt_virtual = NULL;
1241 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1243 bo_gem->gem_handle, bo_gem->name,
1245 if (--bo_gem->map_count == 0)
1246 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1251 bo->virtual = bo_gem->gtt_virtual;
1253 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1254 bo_gem->gtt_virtual);
1259 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1261 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1262 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1263 struct drm_i915_gem_set_domain set_domain;
1266 pthread_mutex_lock(&bufmgr_gem->lock);
1270 pthread_mutex_unlock(&bufmgr_gem->lock);
1274 /* Now move it to the GTT domain so that the GPU and CPU
1275 * caches are flushed and the GPU isn't actively using the
1278 * The pagefault handler does this domain change for us when
1279 * it has unbound the BO from the GTT, but it's up to us to
1280 * tell it when we're about to use things if we had done
1281 * rendering and it still happens to be bound to the GTT.
1283 VG_CLEAR(set_domain);
1284 set_domain.handle = bo_gem->gem_handle;
1285 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1286 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1287 ret = drmIoctl(bufmgr_gem->fd,
1288 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1291 DBG("%s:%d: Error setting domain %d: %s\n",
1292 __FILE__, __LINE__, bo_gem->gem_handle,
1296 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1297 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1298 pthread_mutex_unlock(&bufmgr_gem->lock);
1304 * Performs a mapping of the buffer object like the normal GTT
1305 * mapping, but avoids waiting for the GPU to be done reading from or
1306 * rendering to the buffer.
1308 * This is used in the implementation of GL_ARB_map_buffer_range: The
1309 * user asks to create a buffer, then does a mapping, fills some
1310 * space, runs a drawing command, then asks to map it again without
1311 * synchronizing because it guarantees that it won't write over the
1312 * data that the GPU is busy using (or, more specifically, that if it
1313 * does write over the data, it acknowledges that rendering is
1317 int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1319 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1322 /* If the CPU cache isn't coherent with the GTT, then use a
1323 * regular synchronized mapping. The problem is that we don't
1324 * track where the buffer was last used on the CPU side in
1325 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1326 * we would potentially corrupt the buffer even when the user
1327 * does reasonable things.
1329 if (!bufmgr_gem->has_llc)
1330 return drm_intel_gem_bo_map_gtt(bo);
1332 pthread_mutex_lock(&bufmgr_gem->lock);
1334 pthread_mutex_unlock(&bufmgr_gem->lock);
1339 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1341 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1342 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1348 pthread_mutex_lock(&bufmgr_gem->lock);
1350 if (bo_gem->map_count <= 0) {
1351 DBG("attempted to unmap an unmapped bo\n");
1352 pthread_mutex_unlock(&bufmgr_gem->lock);
1353 /* Preserve the old behaviour of just treating this as a
1354 * no-op rather than reporting the error.
1359 if (bo_gem->mapped_cpu_write) {
1360 struct drm_i915_gem_sw_finish sw_finish;
1362 /* Cause a flush to happen if the buffer's pinned for
1363 * scanout, so the results show up in a timely manner.
1364 * Unlike GTT set domains, this only does work if the
1365 * buffer should be scanout-related.
1367 VG_CLEAR(sw_finish);
1368 sw_finish.handle = bo_gem->gem_handle;
1369 ret = drmIoctl(bufmgr_gem->fd,
1370 DRM_IOCTL_I915_GEM_SW_FINISH,
1372 ret = ret == -1 ? -errno : 0;
1374 bo_gem->mapped_cpu_write = false;
1377 /* We need to unmap after every innovation as we cannot track
1378 * an open vma for every bo as that will exhaasut the system
1379 * limits and cause later failures.
1381 if (--bo_gem->map_count == 0) {
1382 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1383 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1386 pthread_mutex_unlock(&bufmgr_gem->lock);
1391 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1393 return drm_intel_gem_bo_unmap(bo);
1397 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1398 unsigned long size, const void *data)
1400 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1401 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1402 struct drm_i915_gem_pwrite pwrite;
1406 pwrite.handle = bo_gem->gem_handle;
1407 pwrite.offset = offset;
1409 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1410 ret = drmIoctl(bufmgr_gem->fd,
1411 DRM_IOCTL_I915_GEM_PWRITE,
1415 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1416 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1417 (int)size, strerror(errno));
1424 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1426 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1427 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1430 VG_CLEAR(get_pipe_from_crtc_id);
1431 get_pipe_from_crtc_id.crtc_id = crtc_id;
1432 ret = drmIoctl(bufmgr_gem->fd,
1433 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1434 &get_pipe_from_crtc_id);
1436 /* We return -1 here to signal that we don't
1437 * know which pipe is associated with this crtc.
1438 * This lets the caller know that this information
1439 * isn't available; using the wrong pipe for
1440 * vblank waiting can cause the chipset to lock up
1445 return get_pipe_from_crtc_id.pipe;
1449 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1450 unsigned long size, void *data)
1452 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1453 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1454 struct drm_i915_gem_pread pread;
1458 pread.handle = bo_gem->gem_handle;
1459 pread.offset = offset;
1461 pread.data_ptr = (uint64_t) (uintptr_t) data;
1462 ret = drmIoctl(bufmgr_gem->fd,
1463 DRM_IOCTL_I915_GEM_PREAD,
1467 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1468 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1469 (int)size, strerror(errno));
1475 /** Waits for all GPU rendering with the object to have completed. */
1477 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1479 drm_intel_gem_bo_start_gtt_access(bo, 1);
1483 * Waits on a BO for the given amount of time.
1485 * @bo: buffer object to wait for
1486 * @timeout_ns: amount of time to wait in nanoseconds.
1487 * If value is less than 0, an infinite wait will occur.
1489 * Returns 0 if the wait was successful ie. the last batch referencing the
1490 * object has completed within the allotted time. Otherwise some negative return
1491 * value describes the error. Of particular interest is -ETIME when the wait has
1492 * failed to yield the desired result.
1494 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1495 * the operation to give up after a certain amount of time. Another subtle
1496 * difference is the internal locking semantics are different (this variant does
1497 * not hold the lock for the duration of the wait). This makes the wait subject
1498 * to a larger userspace race window.
1500 * The implementation shall wait until the object is no longer actively
1501 * referenced within a batch buffer at the time of the call. The wait will
1502 * not guarantee that the buffer is re-issued via another thread, or an flinked
1503 * handle. Userspace must make sure this race does not occur if such precision
1506 int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
1508 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1509 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1510 struct drm_i915_gem_wait wait;
1513 if (!bufmgr_gem->has_wait_timeout) {
1514 DBG("%s:%d: Timed wait is not supported. Falling back to "
1515 "infinite wait\n", __FILE__, __LINE__);
1517 drm_intel_gem_bo_wait_rendering(bo);
1520 return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
1524 wait.bo_handle = bo_gem->gem_handle;
1525 wait.timeout_ns = timeout_ns;
1527 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1535 * Sets the object to the GTT read and possibly write domain, used by the X
1536 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1538 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1539 * can do tiled pixmaps this way.
1542 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1544 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1545 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1546 struct drm_i915_gem_set_domain set_domain;
1549 VG_CLEAR(set_domain);
1550 set_domain.handle = bo_gem->gem_handle;
1551 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1552 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1553 ret = drmIoctl(bufmgr_gem->fd,
1554 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1557 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1558 __FILE__, __LINE__, bo_gem->gem_handle,
1559 set_domain.read_domains, set_domain.write_domain,
1565 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1567 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1570 free(bufmgr_gem->exec2_objects);
1571 free(bufmgr_gem->exec_objects);
1572 free(bufmgr_gem->exec_bos);
1574 pthread_mutex_destroy(&bufmgr_gem->lock);
1576 /* Free any cached buffer objects we were going to reuse */
1577 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1578 struct drm_intel_gem_bo_bucket *bucket =
1579 &bufmgr_gem->cache_bucket[i];
1580 drm_intel_bo_gem *bo_gem;
1582 while (!DRMLISTEMPTY(&bucket->head)) {
1583 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1584 bucket->head.next, head);
1585 DRMLISTDEL(&bo_gem->head);
1587 drm_intel_gem_bo_free(&bo_gem->bo);
1595 * Adds the target buffer to the validation list and adds the relocation
1596 * to the reloc_buffer's relocation list.
1598 * The relocation entry at the given offset must already contain the
1599 * precomputed relocation value, because the kernel will optimize out
1600 * the relocation entry write when the buffer hasn't moved from the
1601 * last known offset in target_bo.
1604 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1605 drm_intel_bo *target_bo, uint32_t target_offset,
1606 uint32_t read_domains, uint32_t write_domain,
1609 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1610 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1611 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1612 bool fenced_command;
1614 if (bo_gem->has_error)
1617 if (target_bo_gem->has_error) {
1618 bo_gem->has_error = true;
1622 /* We never use HW fences for rendering on 965+ */
1623 if (bufmgr_gem->gen >= 4)
1626 fenced_command = need_fence;
1627 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1630 /* Create a new relocation list if needed */
1631 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1634 /* Check overflow */
1635 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1638 assert(offset <= bo->size - 4);
1639 assert((write_domain & (write_domain - 1)) == 0);
1641 /* Make sure that we're not adding a reloc to something whose size has
1642 * already been accounted for.
1644 assert(!bo_gem->used_as_reloc_target);
1645 if (target_bo_gem != bo_gem) {
1646 target_bo_gem->used_as_reloc_target = true;
1647 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1649 /* An object needing a fence is a tiled buffer, so it won't have
1650 * relocs to other buffers.
1653 target_bo_gem->reloc_tree_fences = 1;
1654 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1656 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1657 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1658 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1659 target_bo_gem->gem_handle;
1660 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1661 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1662 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1664 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1665 if (target_bo != bo)
1666 drm_intel_gem_bo_reference(target_bo);
1668 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1669 DRM_INTEL_RELOC_FENCE;
1671 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1673 bo_gem->reloc_count++;
1679 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1680 drm_intel_bo *target_bo, uint32_t target_offset,
1681 uint32_t read_domains, uint32_t write_domain)
1683 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1685 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1686 read_domains, write_domain,
1687 !bufmgr_gem->fenced_relocs);
1691 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1692 drm_intel_bo *target_bo,
1693 uint32_t target_offset,
1694 uint32_t read_domains, uint32_t write_domain)
1696 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1697 read_domains, write_domain, true);
1701 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1703 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1705 return bo_gem->reloc_count;
1709 * Removes existing relocation entries in the BO after "start".
1711 * This allows a user to avoid a two-step process for state setup with
1712 * counting up all the buffer objects and doing a
1713 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1714 * relocations for the state setup. Instead, save the state of the
1715 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1716 * state, and then check if it still fits in the aperture.
1718 * Any further drm_intel_bufmgr_check_aperture_space() queries
1719 * involving this buffer in the tree are undefined after this call.
1722 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1724 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1726 struct timespec time;
1728 clock_gettime(CLOCK_MONOTONIC, &time);
1730 assert(bo_gem->reloc_count >= start);
1731 /* Unreference the cleared target buffers */
1732 for (i = start; i < bo_gem->reloc_count; i++) {
1733 if (bo_gem->reloc_target_info[i].bo != bo) {
1734 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1735 reloc_target_info[i].bo,
1739 bo_gem->reloc_count = start;
1743 * Walk the tree of relocations rooted at BO and accumulate the list of
1744 * validations to be performed and update the relocation buffers with
1745 * index values into the validation list.
1748 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1750 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1753 if (bo_gem->relocs == NULL)
1756 for (i = 0; i < bo_gem->reloc_count; i++) {
1757 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1759 if (target_bo == bo)
1762 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1764 /* Continue walking the tree depth-first. */
1765 drm_intel_gem_bo_process_reloc(target_bo);
1767 /* Add the target to the validate list */
1768 drm_intel_add_validate_buffer(target_bo);
1773 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1775 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1778 if (bo_gem->relocs == NULL)
1781 for (i = 0; i < bo_gem->reloc_count; i++) {
1782 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1785 if (target_bo == bo)
1788 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1790 /* Continue walking the tree depth-first. */
1791 drm_intel_gem_bo_process_reloc2(target_bo);
1793 need_fence = (bo_gem->reloc_target_info[i].flags &
1794 DRM_INTEL_RELOC_FENCE);
1796 /* Add the target to the validate list */
1797 drm_intel_add_validate_buffer2(target_bo, need_fence);
1803 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1807 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1808 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1809 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1811 /* Update the buffer offset */
1812 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1813 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1814 bo_gem->gem_handle, bo_gem->name, bo->offset,
1815 (unsigned long long)bufmgr_gem->exec_objects[i].
1817 bo->offset = bufmgr_gem->exec_objects[i].offset;
1823 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1827 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1828 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1829 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1831 /* Update the buffer offset */
1832 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1833 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1834 bo_gem->gem_handle, bo_gem->name, bo->offset,
1835 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1836 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1842 aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data)
1844 fwrite(&data, 1, 4, bufmgr_gem->aub_file);
1848 aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size)
1850 fwrite(data, 1, size, bufmgr_gem->aub_file);
1854 aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size)
1856 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1857 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1861 data = malloc(bo->size);
1862 drm_intel_bo_get_subdata(bo, offset, size, data);
1864 /* Easy mode: write out bo with no relocations */
1865 if (!bo_gem->reloc_count) {
1866 aub_out_data(bufmgr_gem, data, size);
1871 /* Otherwise, handle the relocations while writing. */
1872 for (i = 0; i < size / 4; i++) {
1874 for (r = 0; r < bo_gem->reloc_count; r++) {
1875 struct drm_i915_gem_relocation_entry *reloc;
1876 drm_intel_reloc_target *info;
1878 reloc = &bo_gem->relocs[r];
1879 info = &bo_gem->reloc_target_info[r];
1881 if (reloc->offset == offset + i * 4) {
1882 drm_intel_bo_gem *target_gem;
1885 target_gem = (drm_intel_bo_gem *)info->bo;
1888 val += target_gem->aub_offset;
1890 aub_out(bufmgr_gem, val);
1895 if (r == bo_gem->reloc_count) {
1896 /* no relocation, just the data */
1897 aub_out(bufmgr_gem, data[i]);
1905 aub_bo_get_address(drm_intel_bo *bo)
1907 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1908 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1910 /* Give the object a graphics address in the AUB file. We
1911 * don't just use the GEM object address because we do AUB
1912 * dumping before execution -- we want to successfully log
1913 * when the hardware might hang, and we might even want to aub
1914 * capture for a driver trying to execute on a different
1915 * generation of hardware by disabling the actual kernel exec
1918 bo_gem->aub_offset = bufmgr_gem->aub_offset;
1919 bufmgr_gem->aub_offset += bo->size;
1920 /* XXX: Handle aperture overflow. */
1921 assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024);
1925 aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
1926 uint32_t offset, uint32_t size)
1928 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1929 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1932 CMD_AUB_TRACE_HEADER_BLOCK |
1935 AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE);
1936 aub_out(bufmgr_gem, subtype);
1937 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
1938 aub_out(bufmgr_gem, size);
1939 aub_write_bo_data(bo, offset, size);
1943 * Break up large objects into multiple writes. Otherwise a 128kb VBO
1944 * would overflow the 16 bits of size field in the packet header and
1945 * everything goes badly after that.
1948 aub_write_large_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
1949 uint32_t offset, uint32_t size)
1951 uint32_t block_size;
1952 uint32_t sub_offset;
1954 for (sub_offset = 0; sub_offset < size; sub_offset += block_size) {
1955 block_size = size - sub_offset;
1957 if (block_size > 8 * 4096)
1958 block_size = 8 * 4096;
1960 aub_write_trace_block(bo, type, subtype, offset + sub_offset,
1966 aub_write_bo(drm_intel_bo *bo)
1968 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1969 uint32_t offset = 0;
1972 aub_bo_get_address(bo);
1974 /* Write out each annotated section separately. */
1975 for (i = 0; i < bo_gem->aub_annotation_count; ++i) {
1976 drm_intel_aub_annotation *annotation =
1977 &bo_gem->aub_annotations[i];
1978 uint32_t ending_offset = annotation->ending_offset;
1979 if (ending_offset > bo->size)
1980 ending_offset = bo->size;
1981 if (ending_offset > offset) {
1982 aub_write_large_trace_block(bo, annotation->type,
1983 annotation->subtype,
1985 ending_offset - offset);
1986 offset = ending_offset;
1990 /* Write out any remaining unannotated data */
1991 if (offset < bo->size) {
1992 aub_write_large_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
1993 offset, bo->size - offset);
1998 * Make a ringbuffer on fly and dump it
2001 aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
2002 uint32_t batch_buffer, int ring_flag)
2004 uint32_t ringbuffer[4096];
2005 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
2008 if (ring_flag == I915_EXEC_BSD)
2009 ring = AUB_TRACE_TYPE_RING_PRB1;
2011 /* Make a ring buffer to execute our batchbuffer. */
2012 memset(ringbuffer, 0, sizeof(ringbuffer));
2013 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
2014 ringbuffer[ring_count++] = batch_buffer;
2016 /* Write out the ring. This appears to trigger execution of
2017 * the ring in the simulator.
2020 CMD_AUB_TRACE_HEADER_BLOCK |
2023 AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
2024 aub_out(bufmgr_gem, 0); /* general/surface subtype */
2025 aub_out(bufmgr_gem, bufmgr_gem->aub_offset);
2026 aub_out(bufmgr_gem, ring_count * 4);
2028 /* FIXME: Need some flush operations here? */
2029 aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4);
2031 /* Update offset pointer */
2032 bufmgr_gem->aub_offset += 4096;
2036 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
2037 int x1, int y1, int width, int height,
2038 enum aub_dump_bmp_format format,
2039 int pitch, int offset)
2041 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2042 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2046 case AUB_DUMP_BMP_FORMAT_8BIT:
2049 case AUB_DUMP_BMP_FORMAT_ARGB_4444:
2052 case AUB_DUMP_BMP_FORMAT_ARGB_0888:
2053 case AUB_DUMP_BMP_FORMAT_ARGB_8888:
2057 printf("Unknown AUB dump format %d\n", format);
2061 if (!bufmgr_gem->aub_file)
2064 aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4);
2065 aub_out(bufmgr_gem, (y1 << 16) | x1);
2070 aub_out(bufmgr_gem, (height << 16) | width);
2071 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
2073 ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) |
2074 ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0));
2078 aub_exec(drm_intel_bo *bo, int ring_flag, int used)
2080 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2081 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2083 bool batch_buffer_needs_annotations;
2085 if (!bufmgr_gem->aub_file)
2088 /* If batch buffer is not annotated, annotate it the best we
2091 batch_buffer_needs_annotations = bo_gem->aub_annotation_count == 0;
2092 if (batch_buffer_needs_annotations) {
2093 drm_intel_aub_annotation annotations[2] = {
2094 { AUB_TRACE_TYPE_BATCH, 0, used },
2095 { AUB_TRACE_TYPE_NOTYPE, 0, bo->size }
2097 drm_intel_bufmgr_gem_set_aub_annotations(bo, annotations, 2);
2100 /* Write out all buffers to AUB memory */
2101 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2102 aub_write_bo(bufmgr_gem->exec_bos[i]);
2105 /* Remove any annotations we added */
2106 if (batch_buffer_needs_annotations)
2107 drm_intel_bufmgr_gem_set_aub_annotations(bo, NULL, 0);
2109 /* Dump ring buffer */
2110 aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag);
2112 fflush(bufmgr_gem->aub_file);
2115 * One frame has been dumped. So reset the aub_offset for the next frame.
2117 * FIXME: Can we do this?
2119 bufmgr_gem->aub_offset = 0x10000;
2123 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2124 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
2126 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2127 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2128 struct drm_i915_gem_execbuffer execbuf;
2131 if (bo_gem->has_error)
2134 pthread_mutex_lock(&bufmgr_gem->lock);
2135 /* Update indices and set up the validate list. */
2136 drm_intel_gem_bo_process_reloc(bo);
2138 /* Add the batch buffer to the validation list. There are no
2139 * relocations pointing to it.
2141 drm_intel_add_validate_buffer(bo);
2144 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2145 execbuf.buffer_count = bufmgr_gem->exec_count;
2146 execbuf.batch_start_offset = 0;
2147 execbuf.batch_len = used;
2148 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2149 execbuf.num_cliprects = num_cliprects;
2153 ret = drmIoctl(bufmgr_gem->fd,
2154 DRM_IOCTL_I915_GEM_EXECBUFFER,
2158 if (errno == ENOSPC) {
2159 DBG("Execbuffer fails to pin. "
2160 "Estimate: %u. Actual: %u. Available: %u\n",
2161 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2164 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2167 (unsigned int)bufmgr_gem->gtt_size);
2170 drm_intel_update_buffer_offsets(bufmgr_gem);
2172 if (bufmgr_gem->bufmgr.debug)
2173 drm_intel_gem_dump_validation_list(bufmgr_gem);
2175 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2176 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2177 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2179 /* Disconnect the buffer from the validate list */
2180 bo_gem->validate_index = -1;
2181 bufmgr_gem->exec_bos[i] = NULL;
2183 bufmgr_gem->exec_count = 0;
2184 pthread_mutex_unlock(&bufmgr_gem->lock);
2190 do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2191 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2194 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2195 struct drm_i915_gem_execbuffer2 execbuf;
2199 switch (flags & 0x7) {
2203 if (!bufmgr_gem->has_blt)
2207 if (!bufmgr_gem->has_bsd)
2210 case I915_EXEC_RENDER:
2211 case I915_EXEC_DEFAULT:
2215 pthread_mutex_lock(&bufmgr_gem->lock);
2216 /* Update indices and set up the validate list. */
2217 drm_intel_gem_bo_process_reloc2(bo);
2219 /* Add the batch buffer to the validation list. There are no relocations
2222 drm_intel_add_validate_buffer2(bo, 0);
2225 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2226 execbuf.buffer_count = bufmgr_gem->exec_count;
2227 execbuf.batch_start_offset = 0;
2228 execbuf.batch_len = used;
2229 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2230 execbuf.num_cliprects = num_cliprects;
2233 execbuf.flags = flags;
2235 i915_execbuffer2_set_context_id(execbuf, 0);
2237 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
2240 aub_exec(bo, flags, used);
2242 if (bufmgr_gem->no_exec)
2243 goto skip_execution;
2245 ret = drmIoctl(bufmgr_gem->fd,
2246 DRM_IOCTL_I915_GEM_EXECBUFFER2,
2250 if (ret == -ENOSPC) {
2251 DBG("Execbuffer fails to pin. "
2252 "Estimate: %u. Actual: %u. Available: %u\n",
2253 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2254 bufmgr_gem->exec_count),
2255 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2256 bufmgr_gem->exec_count),
2257 (unsigned int) bufmgr_gem->gtt_size);
2260 drm_intel_update_buffer_offsets2(bufmgr_gem);
2263 if (bufmgr_gem->bufmgr.debug)
2264 drm_intel_gem_dump_validation_list(bufmgr_gem);
2266 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2267 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2268 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2270 /* Disconnect the buffer from the validate list */
2271 bo_gem->validate_index = -1;
2272 bufmgr_gem->exec_bos[i] = NULL;
2274 bufmgr_gem->exec_count = 0;
2275 pthread_mutex_unlock(&bufmgr_gem->lock);
2281 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2282 drm_clip_rect_t *cliprects, int num_cliprects,
2285 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2290 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2291 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2294 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2299 drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2300 int used, unsigned int flags)
2302 return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
2306 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2308 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2309 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2310 struct drm_i915_gem_pin pin;
2314 pin.handle = bo_gem->gem_handle;
2315 pin.alignment = alignment;
2317 ret = drmIoctl(bufmgr_gem->fd,
2318 DRM_IOCTL_I915_GEM_PIN,
2323 bo->offset = pin.offset;
2328 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2330 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2331 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2332 struct drm_i915_gem_unpin unpin;
2336 unpin.handle = bo_gem->gem_handle;
2338 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2346 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2347 uint32_t tiling_mode,
2350 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2351 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2352 struct drm_i915_gem_set_tiling set_tiling;
2355 if (bo_gem->global_name == 0 &&
2356 tiling_mode == bo_gem->tiling_mode &&
2357 stride == bo_gem->stride)
2360 memset(&set_tiling, 0, sizeof(set_tiling));
2362 /* set_tiling is slightly broken and overwrites the
2363 * input on the error path, so we have to open code
2366 set_tiling.handle = bo_gem->gem_handle;
2367 set_tiling.tiling_mode = tiling_mode;
2368 set_tiling.stride = stride;
2370 ret = ioctl(bufmgr_gem->fd,
2371 DRM_IOCTL_I915_GEM_SET_TILING,
2373 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2377 bo_gem->tiling_mode = set_tiling.tiling_mode;
2378 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2379 bo_gem->stride = set_tiling.stride;
2384 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2387 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2388 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2391 /* Linear buffers have no stride. By ensuring that we only ever use
2392 * stride 0 with linear buffers, we simplify our code.
2394 if (*tiling_mode == I915_TILING_NONE)
2397 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2399 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
2401 *tiling_mode = bo_gem->tiling_mode;
2406 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2407 uint32_t * swizzle_mode)
2409 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2411 *tiling_mode = bo_gem->tiling_mode;
2412 *swizzle_mode = bo_gem->swizzle_mode;
2417 drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
2419 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2422 drm_intel_bo_gem *bo_gem;
2423 struct drm_i915_gem_get_tiling get_tiling;
2425 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2427 fprintf(stderr,"ret is %d %d\n", ret, errno);
2431 bo_gem = calloc(1, sizeof(*bo_gem));
2435 bo_gem->bo.size = size;
2436 bo_gem->bo.handle = handle;
2437 bo_gem->bo.bufmgr = bufmgr;
2439 bo_gem->gem_handle = handle;
2441 atomic_set(&bo_gem->refcount, 1);
2443 bo_gem->name = "prime";
2444 bo_gem->validate_index = -1;
2445 bo_gem->reloc_tree_fences = 0;
2446 bo_gem->used_as_reloc_target = false;
2447 bo_gem->has_error = false;
2448 bo_gem->reusable = false;
2450 DRMINITLISTHEAD(&bo_gem->name_list);
2451 DRMINITLISTHEAD(&bo_gem->vma_list);
2453 VG_CLEAR(get_tiling);
2454 get_tiling.handle = bo_gem->gem_handle;
2455 ret = drmIoctl(bufmgr_gem->fd,
2456 DRM_IOCTL_I915_GEM_GET_TILING,
2459 drm_intel_gem_bo_unreference(&bo_gem->bo);
2462 bo_gem->tiling_mode = get_tiling.tiling_mode;
2463 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
2464 /* XXX stride is unknown */
2465 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
2471 drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
2473 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2474 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2476 return drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle, DRM_CLOEXEC, prime_fd);
2480 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2482 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2483 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2486 if (!bo_gem->global_name) {
2487 struct drm_gem_flink flink;
2490 flink.handle = bo_gem->gem_handle;
2492 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
2496 bo_gem->global_name = flink.name;
2497 bo_gem->reusable = false;
2499 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2502 *name = bo_gem->global_name;
2507 * Enables unlimited caching of buffer objects for reuse.
2509 * This is potentially very memory expensive, as the cache at each bucket
2510 * size is only bounded by how many buffers of that size we've managed to have
2511 * in flight at once.
2514 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2516 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2518 bufmgr_gem->bo_reuse = true;
2522 * Enable use of fenced reloc type.
2524 * New code should enable this to avoid unnecessary fence register
2525 * allocation. If this option is not enabled, all relocs will have fence
2526 * register allocated.
2529 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2531 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2533 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2534 bufmgr_gem->fenced_relocs = true;
2538 * Return the additional aperture space required by the tree of buffer objects
2542 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2544 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2548 if (bo == NULL || bo_gem->included_in_check_aperture)
2552 bo_gem->included_in_check_aperture = true;
2554 for (i = 0; i < bo_gem->reloc_count; i++)
2556 drm_intel_gem_bo_get_aperture_space(bo_gem->
2557 reloc_target_info[i].bo);
2563 * Count the number of buffers in this list that need a fence reg
2565 * If the count is greater than the number of available regs, we'll have
2566 * to ask the caller to resubmit a batch with fewer tiled buffers.
2568 * This function over-counts if the same buffer is used multiple times.
2571 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2574 unsigned int total = 0;
2576 for (i = 0; i < count; i++) {
2577 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2582 total += bo_gem->reloc_tree_fences;
2588 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2589 * for the next drm_intel_bufmgr_check_aperture_space() call.
2592 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2594 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2597 if (bo == NULL || !bo_gem->included_in_check_aperture)
2600 bo_gem->included_in_check_aperture = false;
2602 for (i = 0; i < bo_gem->reloc_count; i++)
2603 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2604 reloc_target_info[i].bo);
2608 * Return a conservative estimate for the amount of aperture required
2609 * for a collection of buffers. This may double-count some buffers.
2612 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2615 unsigned int total = 0;
2617 for (i = 0; i < count; i++) {
2618 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2620 total += bo_gem->reloc_tree_size;
2626 * Return the amount of aperture needed for a collection of buffers.
2627 * This avoids double counting any buffers, at the cost of looking
2628 * at every buffer in the set.
2631 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2634 unsigned int total = 0;
2636 for (i = 0; i < count; i++) {
2637 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2638 /* For the first buffer object in the array, we get an
2639 * accurate count back for its reloc_tree size (since nothing
2640 * had been flagged as being counted yet). We can save that
2641 * value out as a more conservative reloc_tree_size that
2642 * avoids double-counting target buffers. Since the first
2643 * buffer happens to usually be the batch buffer in our
2644 * callers, this can pull us back from doing the tree
2645 * walk on every new batch emit.
2648 drm_intel_bo_gem *bo_gem =
2649 (drm_intel_bo_gem *) bo_array[i];
2650 bo_gem->reloc_tree_size = total;
2654 for (i = 0; i < count; i++)
2655 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2660 * Return -1 if the batchbuffer should be flushed before attempting to
2661 * emit rendering referencing the buffers pointed to by bo_array.
2663 * This is required because if we try to emit a batchbuffer with relocations
2664 * to a tree of buffers that won't simultaneously fit in the aperture,
2665 * the rendering will return an error at a point where the software is not
2666 * prepared to recover from it.
2668 * However, we also want to emit the batchbuffer significantly before we reach
2669 * the limit, as a series of batchbuffers each of which references buffers
2670 * covering almost all of the aperture means that at each emit we end up
2671 * waiting to evict a buffer from the last rendering, and we get synchronous
2672 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2673 * get better parallelism.
2676 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2678 drm_intel_bufmgr_gem *bufmgr_gem =
2679 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2680 unsigned int total = 0;
2681 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2684 /* Check for fence reg constraints if necessary */
2685 if (bufmgr_gem->available_fences) {
2686 total_fences = drm_intel_gem_total_fences(bo_array, count);
2687 if (total_fences > bufmgr_gem->available_fences)
2691 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2693 if (total > threshold)
2694 total = drm_intel_gem_compute_batch_space(bo_array, count);
2696 if (total > threshold) {
2697 DBG("check_space: overflowed available aperture, "
2699 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2702 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2703 (int)bufmgr_gem->gtt_size / 1024);
2709 * Disable buffer reuse for objects which are shared with the kernel
2710 * as scanout buffers
2713 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2715 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2717 bo_gem->reusable = false;
2722 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2724 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2726 return bo_gem->reusable;
2730 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2732 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2735 for (i = 0; i < bo_gem->reloc_count; i++) {
2736 if (bo_gem->reloc_target_info[i].bo == target_bo)
2738 if (bo == bo_gem->reloc_target_info[i].bo)
2740 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2748 /** Return true if target_bo is referenced by bo's relocation tree. */
2750 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2752 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2754 if (bo == NULL || target_bo == NULL)
2756 if (target_bo_gem->used_as_reloc_target)
2757 return _drm_intel_gem_bo_references(bo, target_bo);
2762 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2764 unsigned int i = bufmgr_gem->num_buckets;
2766 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2768 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2769 bufmgr_gem->cache_bucket[i].size = size;
2770 bufmgr_gem->num_buckets++;
2774 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2776 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2778 /* OK, so power of two buckets was too wasteful of memory.
2779 * Give 3 other sizes between each power of two, to hopefully
2780 * cover things accurately enough. (The alternative is
2781 * probably to just go for exact matching of sizes, and assume
2782 * that for things like composited window resize the tiled
2783 * width/height alignment and rounding of sizes to pages will
2784 * get us useful cache hit rates anyway)
2786 add_bucket(bufmgr_gem, 4096);
2787 add_bucket(bufmgr_gem, 4096 * 2);
2788 add_bucket(bufmgr_gem, 4096 * 3);
2790 /* Initialize the linked lists for BO reuse cache. */
2791 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2792 add_bucket(bufmgr_gem, size);
2794 add_bucket(bufmgr_gem, size + size * 1 / 4);
2795 add_bucket(bufmgr_gem, size + size * 2 / 4);
2796 add_bucket(bufmgr_gem, size + size * 3 / 4);
2801 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
2803 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2805 bufmgr_gem->vma_max = limit;
2807 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
2811 * Get the PCI ID for the device. This can be overridden by setting the
2812 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
2815 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
2817 char *devid_override;
2820 drm_i915_getparam_t gp;
2822 if (geteuid() == getuid()) {
2823 devid_override = getenv("INTEL_DEVID_OVERRIDE");
2824 if (devid_override) {
2825 bufmgr_gem->no_exec = true;
2826 return strtod(devid_override, NULL);
2832 gp.param = I915_PARAM_CHIPSET_ID;
2834 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2836 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2837 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2843 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
2845 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2847 return bufmgr_gem->pci_device;
2851 * Sets up AUB dumping.
2853 * This is a trace file format that can be used with the simulator.
2854 * Packets are emitted in a format somewhat like GPU command packets.
2855 * You can set up a GTT and upload your objects into the referenced
2856 * space, then send off batchbuffers and get BMPs out the other end.
2859 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
2861 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2862 int entry = 0x200003;
2864 int gtt_size = 0x10000;
2867 if (bufmgr_gem->aub_file) {
2868 fclose(bufmgr_gem->aub_file);
2869 bufmgr_gem->aub_file = NULL;
2873 if (geteuid() != getuid())
2876 bufmgr_gem->aub_file = fopen("intel.aub", "w+");
2877 if (!bufmgr_gem->aub_file)
2880 /* Start allocating objects from just after the GTT. */
2881 bufmgr_gem->aub_offset = gtt_size;
2883 /* Start with a (required) version packet. */
2884 aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2));
2886 (4 << AUB_HEADER_MAJOR_SHIFT) |
2887 (0 << AUB_HEADER_MINOR_SHIFT));
2888 for (i = 0; i < 8; i++) {
2889 aub_out(bufmgr_gem, 0); /* app name */
2891 aub_out(bufmgr_gem, 0); /* timestamp */
2892 aub_out(bufmgr_gem, 0); /* timestamp */
2893 aub_out(bufmgr_gem, 0); /* comment len */
2895 /* Set up the GTT. The max we can handle is 256M */
2896 aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | (5 - 2));
2897 aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
2898 aub_out(bufmgr_gem, 0); /* subtype */
2899 aub_out(bufmgr_gem, 0); /* offset */
2900 aub_out(bufmgr_gem, gtt_size); /* size */
2901 for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
2902 aub_out(bufmgr_gem, entry);
2907 drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
2909 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2910 struct drm_i915_gem_context_create create;
2911 drm_intel_context *context = NULL;
2915 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
2917 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
2922 context = calloc(1, sizeof(*context));
2923 context->ctx_id = create.ctx_id;
2924 context->bufmgr = bufmgr;
2930 drm_intel_gem_context_destroy(drm_intel_context *ctx)
2932 drm_intel_bufmgr_gem *bufmgr_gem;
2933 struct drm_i915_gem_context_destroy destroy;
2939 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
2940 destroy.ctx_id = ctx->ctx_id;
2941 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
2944 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
2951 drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
2955 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2956 struct drm_i915_reg_read reg_read;
2960 reg_read.offset = offset;
2962 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read);
2964 *result = reg_read.val;
2970 * Annotate the given bo for use in aub dumping.
2972 * \param annotations is an array of drm_intel_aub_annotation objects
2973 * describing the type of data in various sections of the bo. Each
2974 * element of the array specifies the type and subtype of a section of
2975 * the bo, and the past-the-end offset of that section. The elements
2976 * of \c annotations must be sorted so that ending_offset is
2979 * \param count is the number of elements in the \c annotations array.
2980 * If \c count is zero, then \c annotations will not be dereferenced.
2982 * Annotations are copied into a private data structure, so caller may
2983 * re-use the memory pointed to by \c annotations after the call
2986 * Annotations are stored for the lifetime of the bo; to reset to the
2987 * default state (no annotations), call this function with a \c count
2991 drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
2992 drm_intel_aub_annotation *annotations,
2995 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2996 unsigned size = sizeof(*annotations) * count;
2997 drm_intel_aub_annotation *new_annotations =
2998 count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL;
2999 if (new_annotations == NULL) {
3000 free(bo_gem->aub_annotations);
3001 bo_gem->aub_annotations = NULL;
3002 bo_gem->aub_annotation_count = 0;
3005 memcpy(new_annotations, annotations, size);
3006 bo_gem->aub_annotations = new_annotations;
3007 bo_gem->aub_annotation_count = count;
3011 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3012 * and manage map buffer objections.
3014 * \param fd File descriptor of the opened DRM device.
3017 drm_intel_bufmgr_gem_init(int fd, int batch_size)
3019 drm_intel_bufmgr_gem *bufmgr_gem;
3020 struct drm_i915_gem_get_aperture aperture;
3021 drm_i915_getparam_t gp;
3025 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
3026 if (bufmgr_gem == NULL)
3029 bufmgr_gem->fd = fd;
3031 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
3036 ret = drmIoctl(bufmgr_gem->fd,
3037 DRM_IOCTL_I915_GEM_GET_APERTURE,
3041 bufmgr_gem->gtt_size = aperture.aper_available_size;
3043 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3045 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
3046 fprintf(stderr, "Assuming %dkB available aperture size.\n"
3047 "May lead to reduced performance or incorrect "
3049 (int)bufmgr_gem->gtt_size / 1024);
3052 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
3054 if (IS_GEN2(bufmgr_gem->pci_device))
3055 bufmgr_gem->gen = 2;
3056 else if (IS_GEN3(bufmgr_gem->pci_device))
3057 bufmgr_gem->gen = 3;
3058 else if (IS_GEN4(bufmgr_gem->pci_device))
3059 bufmgr_gem->gen = 4;
3060 else if (IS_GEN5(bufmgr_gem->pci_device))
3061 bufmgr_gem->gen = 5;
3062 else if (IS_GEN6(bufmgr_gem->pci_device))
3063 bufmgr_gem->gen = 6;
3064 else if (IS_GEN7(bufmgr_gem->pci_device))
3065 bufmgr_gem->gen = 7;
3071 if (IS_GEN3(bufmgr_gem->pci_device) &&
3072 bufmgr_gem->gtt_size > 256*1024*1024) {
3073 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3074 * be used for tiled blits. To simplify the accounting, just
3075 * substract the unmappable part (fixed to 256MB on all known
3076 * gen3 devices) if the kernel advertises it. */
3077 bufmgr_gem->gtt_size -= 256*1024*1024;
3083 gp.param = I915_PARAM_HAS_EXECBUF2;
3084 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3088 gp.param = I915_PARAM_HAS_BSD;
3089 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3090 bufmgr_gem->has_bsd = ret == 0;
3092 gp.param = I915_PARAM_HAS_BLT;
3093 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3094 bufmgr_gem->has_blt = ret == 0;
3096 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
3097 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3098 bufmgr_gem->has_relaxed_fencing = ret == 0;
3100 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3101 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3102 bufmgr_gem->has_wait_timeout = ret == 0;
3104 gp.param = I915_PARAM_HAS_LLC;
3105 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3107 /* Kernel does not supports HAS_LLC query, fallback to GPU
3108 * generation detection and assume that we have LLC on GEN6/7
3110 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3111 IS_GEN7(bufmgr_gem->pci_device));
3113 bufmgr_gem->has_llc = ret == 0;
3115 if (bufmgr_gem->gen < 4) {
3116 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
3117 gp.value = &bufmgr_gem->available_fences;
3118 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3120 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
3122 fprintf(stderr, "param: %d, val: %d\n", gp.param,
3124 bufmgr_gem->available_fences = 0;
3126 /* XXX The kernel reports the total number of fences,
3127 * including any that may be pinned.
3129 * We presume that there will be at least one pinned
3130 * fence for the scanout buffer, but there may be more
3131 * than one scanout and the user may be manually
3132 * pinning buffers. Let's move to execbuffer2 and
3133 * thereby forget the insanity of using fences...
3135 bufmgr_gem->available_fences -= 2;
3136 if (bufmgr_gem->available_fences < 0)
3137 bufmgr_gem->available_fences = 0;
3141 /* Let's go with one relocation per every 2 dwords (but round down a bit
3142 * since a power of two will mean an extra page allocation for the reloc
3145 * Every 4 was too few for the blender benchmark.
3147 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
3149 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
3150 bufmgr_gem->bufmgr.bo_alloc_for_render =
3151 drm_intel_gem_bo_alloc_for_render;
3152 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
3153 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
3154 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
3155 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
3156 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
3157 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
3158 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
3159 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
3160 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
3161 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
3162 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
3163 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
3164 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
3165 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
3166 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
3167 /* Use the new one if available */
3169 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
3170 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3172 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
3173 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
3174 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3175 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
3176 bufmgr_gem->bufmgr.debug = 0;
3177 bufmgr_gem->bufmgr.check_aperture_space =
3178 drm_intel_gem_check_aperture_space;
3179 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3180 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
3181 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
3182 drm_intel_gem_get_pipe_from_crtc_id;
3183 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
3185 DRMINITLISTHEAD(&bufmgr_gem->named);
3186 init_cache_buckets(bufmgr_gem);
3188 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
3189 bufmgr_gem->vma_max = -1; /* unlimited by default */
3191 return &bufmgr_gem->bufmgr;