1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
53 #include <sys/types.h>
58 #define ETIME ETIMEDOUT
60 #include "libdrm_lists.h"
61 #include "intel_bufmgr.h"
62 #include "intel_bufmgr_priv.h"
63 #include "intel_chipset.h"
64 #include "intel_aub.h"
77 #define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
79 #define DBG(...) do { \
80 if (bufmgr_gem->bufmgr.debug) \
81 fprintf(stderr, __VA_ARGS__); \
84 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
86 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
88 struct drm_intel_gem_bo_bucket {
93 typedef struct _drm_intel_bufmgr_gem {
94 drm_intel_bufmgr bufmgr;
100 pthread_mutex_t lock;
102 struct drm_i915_gem_exec_object *exec_objects;
103 struct drm_i915_gem_exec_object2 *exec2_objects;
104 drm_intel_bo **exec_bos;
108 /** Array of lists of cached gem objects of power-of-two sizes */
109 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
114 drmMMListHead vma_cache;
115 int vma_count, vma_open, vma_max;
118 int available_fences;
121 unsigned int has_bsd : 1;
122 unsigned int has_blt : 1;
123 unsigned int has_relaxed_fencing : 1;
124 unsigned int has_llc : 1;
125 unsigned int has_wait_timeout : 1;
126 unsigned int bo_reuse : 1;
127 unsigned int no_exec : 1;
128 unsigned int has_vebox : 1;
133 } drm_intel_bufmgr_gem;
135 #define DRM_INTEL_RELOC_FENCE (1<<0)
137 typedef struct _drm_intel_reloc_target_info {
140 } drm_intel_reloc_target;
142 struct _drm_intel_bo_gem {
150 * Kenel-assigned global name for this object
152 unsigned int global_name;
153 drmMMListHead name_list;
156 * Index of the buffer within the validation list while preparing a
157 * batchbuffer execution.
162 * Current tiling mode
164 uint32_t tiling_mode;
165 uint32_t swizzle_mode;
166 unsigned long stride;
170 /** Array passed to the DRM containing relocation information. */
171 struct drm_i915_gem_relocation_entry *relocs;
173 * Array of info structs corresponding to relocs[i].target_handle etc
175 drm_intel_reloc_target *reloc_target_info;
176 /** Number of entries in relocs */
178 /** Mapped address for the buffer, saved across map/unmap cycles */
180 /** GTT virtual address for the buffer, saved across map/unmap cycles */
183 drmMMListHead vma_list;
189 * Boolean of whether this BO and its children have been included in
190 * the current drm_intel_bufmgr_check_aperture_space() total.
192 bool included_in_check_aperture;
195 * Boolean of whether this buffer has been used as a relocation
196 * target and had its size accounted for, and thus can't have any
197 * further relocations added to it.
199 bool used_as_reloc_target;
202 * Boolean of whether we have encountered an error whilst building the relocation tree.
207 * Boolean of whether this buffer can be re-used
212 * Size in bytes of this buffer and its relocation descendents.
214 * Used to avoid costly tree walking in
215 * drm_intel_bufmgr_check_aperture in the common case.
220 * Number of potential fence registers required by this buffer and its
223 int reloc_tree_fences;
225 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
226 bool mapped_cpu_write;
230 drm_intel_aub_annotation *aub_annotations;
231 unsigned aub_annotation_count;
235 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
238 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
241 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
242 uint32_t * swizzle_mode);
245 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
246 uint32_t tiling_mode,
249 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
252 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
254 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
257 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
258 uint32_t *tiling_mode)
260 unsigned long min_size, max_size;
263 if (*tiling_mode == I915_TILING_NONE)
266 /* 965+ just need multiples of page size for tiling */
267 if (bufmgr_gem->gen >= 4)
268 return ROUND_UP_TO(size, 4096);
270 /* Older chips need powers of two, of at least 512k or 1M */
271 if (bufmgr_gem->gen == 3) {
272 min_size = 1024*1024;
273 max_size = 128*1024*1024;
276 max_size = 64*1024*1024;
279 if (size > max_size) {
280 *tiling_mode = I915_TILING_NONE;
284 /* Do we need to allocate every page for the fence? */
285 if (bufmgr_gem->has_relaxed_fencing)
286 return ROUND_UP_TO(size, 4096);
288 for (i = min_size; i < size; i <<= 1)
295 * Round a given pitch up to the minimum required for X tiling on a
296 * given chip. We use 512 as the minimum to allow for a later tiling
300 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
301 unsigned long pitch, uint32_t *tiling_mode)
303 unsigned long tile_width;
306 /* If untiled, then just align it so that we can do rendering
307 * to it with the 3D engine.
309 if (*tiling_mode == I915_TILING_NONE)
310 return ALIGN(pitch, 64);
312 if (*tiling_mode == I915_TILING_X
313 || (IS_915(bufmgr_gem->pci_device)
314 && *tiling_mode == I915_TILING_Y))
319 /* 965 is flexible */
320 if (bufmgr_gem->gen >= 4)
321 return ROUND_UP_TO(pitch, tile_width);
323 /* The older hardware has a maximum pitch of 8192 with tiled
324 * surfaces, so fallback to untiled if it's too large.
327 *tiling_mode = I915_TILING_NONE;
328 return ALIGN(pitch, 64);
331 /* Pre-965 needs power of two tile width */
332 for (i = tile_width; i < pitch; i <<= 1)
338 static struct drm_intel_gem_bo_bucket *
339 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
344 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
345 struct drm_intel_gem_bo_bucket *bucket =
346 &bufmgr_gem->cache_bucket[i];
347 if (bucket->size >= size) {
356 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
360 for (i = 0; i < bufmgr_gem->exec_count; i++) {
361 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
362 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
364 if (bo_gem->relocs == NULL) {
365 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
370 for (j = 0; j < bo_gem->reloc_count; j++) {
371 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
372 drm_intel_bo_gem *target_gem =
373 (drm_intel_bo_gem *) target_bo;
375 DBG("%2d: %d (%s)@0x%08llx -> "
376 "%d (%s)@0x%08lx + 0x%08x\n",
378 bo_gem->gem_handle, bo_gem->name,
379 (unsigned long long)bo_gem->relocs[j].offset,
380 target_gem->gem_handle,
383 bo_gem->relocs[j].delta);
389 drm_intel_gem_bo_reference(drm_intel_bo *bo)
391 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
393 atomic_inc(&bo_gem->refcount);
397 * Adds the given buffer to the list of buffers to be validated (moved into the
398 * appropriate memory type) with the next batch submission.
400 * If a buffer is validated multiple times in a batch submission, it ends up
401 * with the intersection of the memory type flags and the union of the
405 drm_intel_add_validate_buffer(drm_intel_bo *bo)
407 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
408 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
411 if (bo_gem->validate_index != -1)
414 /* Extend the array of validation entries as necessary. */
415 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
416 int new_size = bufmgr_gem->exec_size * 2;
421 bufmgr_gem->exec_objects =
422 realloc(bufmgr_gem->exec_objects,
423 sizeof(*bufmgr_gem->exec_objects) * new_size);
424 bufmgr_gem->exec_bos =
425 realloc(bufmgr_gem->exec_bos,
426 sizeof(*bufmgr_gem->exec_bos) * new_size);
427 bufmgr_gem->exec_size = new_size;
430 index = bufmgr_gem->exec_count;
431 bo_gem->validate_index = index;
432 /* Fill in array entry */
433 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
434 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
435 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
436 bufmgr_gem->exec_objects[index].alignment = 0;
437 bufmgr_gem->exec_objects[index].offset = 0;
438 bufmgr_gem->exec_bos[index] = bo;
439 bufmgr_gem->exec_count++;
443 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
445 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
446 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
449 if (bo_gem->validate_index != -1) {
451 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
452 EXEC_OBJECT_NEEDS_FENCE;
456 /* Extend the array of validation entries as necessary. */
457 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
458 int new_size = bufmgr_gem->exec_size * 2;
463 bufmgr_gem->exec2_objects =
464 realloc(bufmgr_gem->exec2_objects,
465 sizeof(*bufmgr_gem->exec2_objects) * new_size);
466 bufmgr_gem->exec_bos =
467 realloc(bufmgr_gem->exec_bos,
468 sizeof(*bufmgr_gem->exec_bos) * new_size);
469 bufmgr_gem->exec_size = new_size;
472 index = bufmgr_gem->exec_count;
473 bo_gem->validate_index = index;
474 /* Fill in array entry */
475 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
476 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
477 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
478 bufmgr_gem->exec2_objects[index].alignment = 0;
479 bufmgr_gem->exec2_objects[index].offset = 0;
480 bufmgr_gem->exec_bos[index] = bo;
481 bufmgr_gem->exec2_objects[index].flags = 0;
482 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
483 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
485 bufmgr_gem->exec2_objects[index].flags |=
486 EXEC_OBJECT_NEEDS_FENCE;
488 bufmgr_gem->exec_count++;
491 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
495 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
496 drm_intel_bo_gem *bo_gem)
500 assert(!bo_gem->used_as_reloc_target);
502 /* The older chipsets are far-less flexible in terms of tiling,
503 * and require tiled buffer to be size aligned in the aperture.
504 * This means that in the worst possible case we will need a hole
505 * twice as large as the object in order for it to fit into the
506 * aperture. Optimal packing is for wimps.
508 size = bo_gem->bo.size;
509 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
512 if (bufmgr_gem->has_relaxed_fencing) {
513 if (bufmgr_gem->gen == 3)
514 min_size = 1024*1024;
518 while (min_size < size)
523 /* Account for worst-case alignment. */
527 bo_gem->reloc_tree_size = size;
531 drm_intel_setup_reloc_list(drm_intel_bo *bo)
533 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
534 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
535 unsigned int max_relocs = bufmgr_gem->max_relocs;
537 if (bo->size / 4 < max_relocs)
538 max_relocs = bo->size / 4;
540 bo_gem->relocs = malloc(max_relocs *
541 sizeof(struct drm_i915_gem_relocation_entry));
542 bo_gem->reloc_target_info = malloc(max_relocs *
543 sizeof(drm_intel_reloc_target));
544 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
545 bo_gem->has_error = true;
547 free (bo_gem->relocs);
548 bo_gem->relocs = NULL;
550 free (bo_gem->reloc_target_info);
551 bo_gem->reloc_target_info = NULL;
560 drm_intel_gem_bo_busy(drm_intel_bo *bo)
562 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
563 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
564 struct drm_i915_gem_busy busy;
568 busy.handle = bo_gem->gem_handle;
570 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
572 return (ret == 0 && busy.busy);
576 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
577 drm_intel_bo_gem *bo_gem, int state)
579 struct drm_i915_gem_madvise madv;
582 madv.handle = bo_gem->gem_handle;
585 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
587 return madv.retained;
591 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
593 return drm_intel_gem_bo_madvise_internal
594 ((drm_intel_bufmgr_gem *) bo->bufmgr,
595 (drm_intel_bo_gem *) bo,
599 /* drop the oldest entries that have been purged by the kernel */
601 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
602 struct drm_intel_gem_bo_bucket *bucket)
604 while (!DRMLISTEMPTY(&bucket->head)) {
605 drm_intel_bo_gem *bo_gem;
607 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
608 bucket->head.next, head);
609 if (drm_intel_gem_bo_madvise_internal
610 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
613 DRMLISTDEL(&bo_gem->head);
614 drm_intel_gem_bo_free(&bo_gem->bo);
618 static drm_intel_bo *
619 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
623 uint32_t tiling_mode,
624 unsigned long stride)
626 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
627 drm_intel_bo_gem *bo_gem;
628 unsigned int page_size = getpagesize();
630 struct drm_intel_gem_bo_bucket *bucket;
631 bool alloc_from_cache;
632 unsigned long bo_size;
633 bool for_render = false;
635 if (flags & BO_ALLOC_FOR_RENDER)
638 /* Round the allocated size up to a power of two number of pages. */
639 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
641 /* If we don't have caching at this size, don't actually round the
644 if (bucket == NULL) {
646 if (bo_size < page_size)
649 bo_size = bucket->size;
652 pthread_mutex_lock(&bufmgr_gem->lock);
653 /* Get a buffer out of the cache if available */
655 alloc_from_cache = false;
656 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
658 /* Allocate new render-target BOs from the tail (MRU)
659 * of the list, as it will likely be hot in the GPU
660 * cache and in the aperture for us.
662 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
663 bucket->head.prev, head);
664 DRMLISTDEL(&bo_gem->head);
665 alloc_from_cache = true;
667 /* For non-render-target BOs (where we're probably
668 * going to map it first thing in order to fill it
669 * with data), check if the last BO in the cache is
670 * unbusy, and only reuse in that case. Otherwise,
671 * allocating a new buffer is probably faster than
672 * waiting for the GPU to finish.
674 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
675 bucket->head.next, head);
676 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
677 alloc_from_cache = true;
678 DRMLISTDEL(&bo_gem->head);
682 if (alloc_from_cache) {
683 if (!drm_intel_gem_bo_madvise_internal
684 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
685 drm_intel_gem_bo_free(&bo_gem->bo);
686 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
691 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
694 drm_intel_gem_bo_free(&bo_gem->bo);
699 pthread_mutex_unlock(&bufmgr_gem->lock);
701 if (!alloc_from_cache) {
702 struct drm_i915_gem_create create;
704 bo_gem = calloc(1, sizeof(*bo_gem));
708 bo_gem->bo.size = bo_size;
711 create.size = bo_size;
713 ret = drmIoctl(bufmgr_gem->fd,
714 DRM_IOCTL_I915_GEM_CREATE,
716 bo_gem->gem_handle = create.handle;
717 bo_gem->bo.handle = bo_gem->gem_handle;
722 bo_gem->bo.bufmgr = bufmgr;
724 bo_gem->tiling_mode = I915_TILING_NONE;
725 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
728 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
731 drm_intel_gem_bo_free(&bo_gem->bo);
735 DRMINITLISTHEAD(&bo_gem->name_list);
736 DRMINITLISTHEAD(&bo_gem->vma_list);
740 atomic_set(&bo_gem->refcount, 1);
741 bo_gem->validate_index = -1;
742 bo_gem->reloc_tree_fences = 0;
743 bo_gem->used_as_reloc_target = false;
744 bo_gem->has_error = false;
745 bo_gem->reusable = true;
746 bo_gem->aub_annotations = NULL;
747 bo_gem->aub_annotation_count = 0;
749 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
751 DBG("bo_create: buf %d (%s) %ldb\n",
752 bo_gem->gem_handle, bo_gem->name, size);
757 static drm_intel_bo *
758 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
761 unsigned int alignment)
763 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
765 I915_TILING_NONE, 0);
768 static drm_intel_bo *
769 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
772 unsigned int alignment)
774 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
775 I915_TILING_NONE, 0);
778 static drm_intel_bo *
779 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
780 int x, int y, int cpp, uint32_t *tiling_mode,
781 unsigned long *pitch, unsigned long flags)
783 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
784 unsigned long size, stride;
788 unsigned long aligned_y, height_alignment;
790 tiling = *tiling_mode;
792 /* If we're tiled, our allocations are in 8 or 32-row blocks,
793 * so failure to align our height means that we won't allocate
796 * If we're untiled, we still have to align to 2 rows high
797 * because the data port accesses 2x2 blocks even if the
798 * bottom row isn't to be rendered, so failure to align means
799 * we could walk off the end of the GTT and fault. This is
800 * documented on 965, and may be the case on older chipsets
801 * too so we try to be careful.
804 height_alignment = 2;
806 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
807 height_alignment = 16;
808 else if (tiling == I915_TILING_X
809 || (IS_915(bufmgr_gem->pci_device)
810 && tiling == I915_TILING_Y))
811 height_alignment = 8;
812 else if (tiling == I915_TILING_Y)
813 height_alignment = 32;
814 aligned_y = ALIGN(y, height_alignment);
817 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
818 size = stride * aligned_y;
819 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
820 } while (*tiling_mode != tiling);
823 if (tiling == I915_TILING_NONE)
826 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
831 * Returns a drm_intel_bo wrapping the given buffer object handle.
833 * This can be used when one application needs to pass a buffer object
837 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
841 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
842 drm_intel_bo_gem *bo_gem;
844 struct drm_gem_open open_arg;
845 struct drm_i915_gem_get_tiling get_tiling;
848 /* At the moment most applications only have a few named bo.
849 * For instance, in a DRI client only the render buffers passed
850 * between X and the client are named. And since X returns the
851 * alternating names for the front/back buffer a linear search
852 * provides a sufficiently fast match.
854 for (list = bufmgr_gem->named.next;
855 list != &bufmgr_gem->named;
857 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
858 if (bo_gem->global_name == handle) {
859 drm_intel_gem_bo_reference(&bo_gem->bo);
864 bo_gem = calloc(1, sizeof(*bo_gem));
869 open_arg.name = handle;
870 ret = drmIoctl(bufmgr_gem->fd,
874 DBG("Couldn't reference %s handle 0x%08x: %s\n",
875 name, handle, strerror(errno));
879 bo_gem->bo.size = open_arg.size;
880 bo_gem->bo.offset = 0;
881 bo_gem->bo.virtual = NULL;
882 bo_gem->bo.bufmgr = bufmgr;
884 atomic_set(&bo_gem->refcount, 1);
885 bo_gem->validate_index = -1;
886 bo_gem->gem_handle = open_arg.handle;
887 bo_gem->bo.handle = open_arg.handle;
888 bo_gem->global_name = handle;
889 bo_gem->reusable = false;
891 VG_CLEAR(get_tiling);
892 get_tiling.handle = bo_gem->gem_handle;
893 ret = drmIoctl(bufmgr_gem->fd,
894 DRM_IOCTL_I915_GEM_GET_TILING,
897 drm_intel_gem_bo_unreference(&bo_gem->bo);
900 bo_gem->tiling_mode = get_tiling.tiling_mode;
901 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
902 /* XXX stride is unknown */
903 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
905 DRMINITLISTHEAD(&bo_gem->vma_list);
906 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
907 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
913 drm_intel_gem_bo_free(drm_intel_bo *bo)
915 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
916 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
917 struct drm_gem_close close;
920 DRMLISTDEL(&bo_gem->vma_list);
921 if (bo_gem->mem_virtual) {
922 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
923 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
924 bufmgr_gem->vma_count--;
926 if (bo_gem->gtt_virtual) {
927 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
928 bufmgr_gem->vma_count--;
931 /* Close this object */
933 close.handle = bo_gem->gem_handle;
934 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
936 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
937 bo_gem->gem_handle, bo_gem->name, strerror(errno));
939 free(bo_gem->aub_annotations);
944 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
947 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
949 if (bo_gem->mem_virtual)
950 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
952 if (bo_gem->gtt_virtual)
953 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
957 /** Frees all cached buffers significantly older than @time. */
959 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
963 if (bufmgr_gem->time == time)
966 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
967 struct drm_intel_gem_bo_bucket *bucket =
968 &bufmgr_gem->cache_bucket[i];
970 while (!DRMLISTEMPTY(&bucket->head)) {
971 drm_intel_bo_gem *bo_gem;
973 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
974 bucket->head.next, head);
975 if (time - bo_gem->free_time <= 1)
978 DRMLISTDEL(&bo_gem->head);
980 drm_intel_gem_bo_free(&bo_gem->bo);
984 bufmgr_gem->time = time;
987 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
991 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
992 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
994 if (bufmgr_gem->vma_max < 0)
997 /* We may need to evict a few entries in order to create new mmaps */
998 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
1002 while (bufmgr_gem->vma_count > limit) {
1003 drm_intel_bo_gem *bo_gem;
1005 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1006 bufmgr_gem->vma_cache.next,
1008 assert(bo_gem->map_count == 0);
1009 DRMLISTDELINIT(&bo_gem->vma_list);
1011 if (bo_gem->mem_virtual) {
1012 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1013 bo_gem->mem_virtual = NULL;
1014 bufmgr_gem->vma_count--;
1016 if (bo_gem->gtt_virtual) {
1017 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1018 bo_gem->gtt_virtual = NULL;
1019 bufmgr_gem->vma_count--;
1024 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1025 drm_intel_bo_gem *bo_gem)
1027 bufmgr_gem->vma_open--;
1028 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1029 if (bo_gem->mem_virtual)
1030 bufmgr_gem->vma_count++;
1031 if (bo_gem->gtt_virtual)
1032 bufmgr_gem->vma_count++;
1033 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1036 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1037 drm_intel_bo_gem *bo_gem)
1039 bufmgr_gem->vma_open++;
1040 DRMLISTDEL(&bo_gem->vma_list);
1041 if (bo_gem->mem_virtual)
1042 bufmgr_gem->vma_count--;
1043 if (bo_gem->gtt_virtual)
1044 bufmgr_gem->vma_count--;
1045 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1049 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1051 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1052 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1053 struct drm_intel_gem_bo_bucket *bucket;
1056 /* Unreference all the target buffers */
1057 for (i = 0; i < bo_gem->reloc_count; i++) {
1058 if (bo_gem->reloc_target_info[i].bo != bo) {
1059 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1060 reloc_target_info[i].bo,
1064 bo_gem->reloc_count = 0;
1065 bo_gem->used_as_reloc_target = false;
1067 DBG("bo_unreference final: %d (%s)\n",
1068 bo_gem->gem_handle, bo_gem->name);
1070 /* release memory associated with this object */
1071 if (bo_gem->reloc_target_info) {
1072 free(bo_gem->reloc_target_info);
1073 bo_gem->reloc_target_info = NULL;
1075 if (bo_gem->relocs) {
1076 free(bo_gem->relocs);
1077 bo_gem->relocs = NULL;
1080 /* Clear any left-over mappings */
1081 if (bo_gem->map_count) {
1082 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1083 bo_gem->map_count = 0;
1084 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1085 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1088 DRMLISTDEL(&bo_gem->name_list);
1090 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1091 /* Put the buffer into our internal cache for reuse if we can. */
1092 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1093 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1094 I915_MADV_DONTNEED)) {
1095 bo_gem->free_time = time;
1097 bo_gem->name = NULL;
1098 bo_gem->validate_index = -1;
1100 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1102 drm_intel_gem_bo_free(bo);
1106 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1109 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1111 assert(atomic_read(&bo_gem->refcount) > 0);
1112 if (atomic_dec_and_test(&bo_gem->refcount))
1113 drm_intel_gem_bo_unreference_final(bo, time);
1116 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1118 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1120 assert(atomic_read(&bo_gem->refcount) > 0);
1121 if (atomic_dec_and_test(&bo_gem->refcount)) {
1122 drm_intel_bufmgr_gem *bufmgr_gem =
1123 (drm_intel_bufmgr_gem *) bo->bufmgr;
1124 struct timespec time;
1126 clock_gettime(CLOCK_MONOTONIC, &time);
1128 pthread_mutex_lock(&bufmgr_gem->lock);
1129 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1130 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1131 pthread_mutex_unlock(&bufmgr_gem->lock);
1135 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1137 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1138 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1139 struct drm_i915_gem_set_domain set_domain;
1142 pthread_mutex_lock(&bufmgr_gem->lock);
1144 if (bo_gem->map_count++ == 0)
1145 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1147 if (!bo_gem->mem_virtual) {
1148 struct drm_i915_gem_mmap mmap_arg;
1150 DBG("bo_map: %d (%s), map_count=%d\n",
1151 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1154 mmap_arg.handle = bo_gem->gem_handle;
1155 mmap_arg.offset = 0;
1156 mmap_arg.size = bo->size;
1157 ret = drmIoctl(bufmgr_gem->fd,
1158 DRM_IOCTL_I915_GEM_MMAP,
1162 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1163 __FILE__, __LINE__, bo_gem->gem_handle,
1164 bo_gem->name, strerror(errno));
1165 if (--bo_gem->map_count == 0)
1166 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1167 pthread_mutex_unlock(&bufmgr_gem->lock);
1170 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1171 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1173 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1174 bo_gem->mem_virtual);
1175 bo->virtual = bo_gem->mem_virtual;
1177 VG_CLEAR(set_domain);
1178 set_domain.handle = bo_gem->gem_handle;
1179 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1181 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1183 set_domain.write_domain = 0;
1184 ret = drmIoctl(bufmgr_gem->fd,
1185 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1188 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1189 __FILE__, __LINE__, bo_gem->gem_handle,
1194 bo_gem->mapped_cpu_write = true;
1196 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1197 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1198 pthread_mutex_unlock(&bufmgr_gem->lock);
1204 map_gtt(drm_intel_bo *bo)
1206 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1207 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1210 if (bo_gem->map_count++ == 0)
1211 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1213 /* Get a mapping of the buffer if we haven't before. */
1214 if (bo_gem->gtt_virtual == NULL) {
1215 struct drm_i915_gem_mmap_gtt mmap_arg;
1217 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1218 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1221 mmap_arg.handle = bo_gem->gem_handle;
1223 /* Get the fake offset back... */
1224 ret = drmIoctl(bufmgr_gem->fd,
1225 DRM_IOCTL_I915_GEM_MMAP_GTT,
1229 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1231 bo_gem->gem_handle, bo_gem->name,
1233 if (--bo_gem->map_count == 0)
1234 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1239 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1240 MAP_SHARED, bufmgr_gem->fd,
1242 if (bo_gem->gtt_virtual == MAP_FAILED) {
1243 bo_gem->gtt_virtual = NULL;
1245 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1247 bo_gem->gem_handle, bo_gem->name,
1249 if (--bo_gem->map_count == 0)
1250 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1255 bo->virtual = bo_gem->gtt_virtual;
1257 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1258 bo_gem->gtt_virtual);
1263 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1265 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1266 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1267 struct drm_i915_gem_set_domain set_domain;
1270 pthread_mutex_lock(&bufmgr_gem->lock);
1274 pthread_mutex_unlock(&bufmgr_gem->lock);
1278 /* Now move it to the GTT domain so that the GPU and CPU
1279 * caches are flushed and the GPU isn't actively using the
1282 * The pagefault handler does this domain change for us when
1283 * it has unbound the BO from the GTT, but it's up to us to
1284 * tell it when we're about to use things if we had done
1285 * rendering and it still happens to be bound to the GTT.
1287 VG_CLEAR(set_domain);
1288 set_domain.handle = bo_gem->gem_handle;
1289 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1290 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1291 ret = drmIoctl(bufmgr_gem->fd,
1292 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1295 DBG("%s:%d: Error setting domain %d: %s\n",
1296 __FILE__, __LINE__, bo_gem->gem_handle,
1300 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1301 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1302 pthread_mutex_unlock(&bufmgr_gem->lock);
1308 * Performs a mapping of the buffer object like the normal GTT
1309 * mapping, but avoids waiting for the GPU to be done reading from or
1310 * rendering to the buffer.
1312 * This is used in the implementation of GL_ARB_map_buffer_range: The
1313 * user asks to create a buffer, then does a mapping, fills some
1314 * space, runs a drawing command, then asks to map it again without
1315 * synchronizing because it guarantees that it won't write over the
1316 * data that the GPU is busy using (or, more specifically, that if it
1317 * does write over the data, it acknowledges that rendering is
1321 int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1323 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1326 /* If the CPU cache isn't coherent with the GTT, then use a
1327 * regular synchronized mapping. The problem is that we don't
1328 * track where the buffer was last used on the CPU side in
1329 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1330 * we would potentially corrupt the buffer even when the user
1331 * does reasonable things.
1333 if (!bufmgr_gem->has_llc)
1334 return drm_intel_gem_bo_map_gtt(bo);
1336 pthread_mutex_lock(&bufmgr_gem->lock);
1338 pthread_mutex_unlock(&bufmgr_gem->lock);
1343 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1345 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1346 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1352 pthread_mutex_lock(&bufmgr_gem->lock);
1354 if (bo_gem->map_count <= 0) {
1355 DBG("attempted to unmap an unmapped bo\n");
1356 pthread_mutex_unlock(&bufmgr_gem->lock);
1357 /* Preserve the old behaviour of just treating this as a
1358 * no-op rather than reporting the error.
1363 if (bo_gem->mapped_cpu_write) {
1364 struct drm_i915_gem_sw_finish sw_finish;
1366 /* Cause a flush to happen if the buffer's pinned for
1367 * scanout, so the results show up in a timely manner.
1368 * Unlike GTT set domains, this only does work if the
1369 * buffer should be scanout-related.
1371 VG_CLEAR(sw_finish);
1372 sw_finish.handle = bo_gem->gem_handle;
1373 ret = drmIoctl(bufmgr_gem->fd,
1374 DRM_IOCTL_I915_GEM_SW_FINISH,
1376 ret = ret == -1 ? -errno : 0;
1378 bo_gem->mapped_cpu_write = false;
1381 /* We need to unmap after every innovation as we cannot track
1382 * an open vma for every bo as that will exhaasut the system
1383 * limits and cause later failures.
1385 if (--bo_gem->map_count == 0) {
1386 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1387 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1390 pthread_mutex_unlock(&bufmgr_gem->lock);
1395 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1397 return drm_intel_gem_bo_unmap(bo);
1401 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1402 unsigned long size, const void *data)
1404 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1405 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1406 struct drm_i915_gem_pwrite pwrite;
1410 pwrite.handle = bo_gem->gem_handle;
1411 pwrite.offset = offset;
1413 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1414 ret = drmIoctl(bufmgr_gem->fd,
1415 DRM_IOCTL_I915_GEM_PWRITE,
1419 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1420 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1421 (int)size, strerror(errno));
1428 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1430 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1431 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1434 VG_CLEAR(get_pipe_from_crtc_id);
1435 get_pipe_from_crtc_id.crtc_id = crtc_id;
1436 ret = drmIoctl(bufmgr_gem->fd,
1437 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1438 &get_pipe_from_crtc_id);
1440 /* We return -1 here to signal that we don't
1441 * know which pipe is associated with this crtc.
1442 * This lets the caller know that this information
1443 * isn't available; using the wrong pipe for
1444 * vblank waiting can cause the chipset to lock up
1449 return get_pipe_from_crtc_id.pipe;
1453 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1454 unsigned long size, void *data)
1456 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1457 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1458 struct drm_i915_gem_pread pread;
1462 pread.handle = bo_gem->gem_handle;
1463 pread.offset = offset;
1465 pread.data_ptr = (uint64_t) (uintptr_t) data;
1466 ret = drmIoctl(bufmgr_gem->fd,
1467 DRM_IOCTL_I915_GEM_PREAD,
1471 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1472 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1473 (int)size, strerror(errno));
1479 /** Waits for all GPU rendering with the object to have completed. */
1481 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1483 drm_intel_gem_bo_start_gtt_access(bo, 1);
1487 * Waits on a BO for the given amount of time.
1489 * @bo: buffer object to wait for
1490 * @timeout_ns: amount of time to wait in nanoseconds.
1491 * If value is less than 0, an infinite wait will occur.
1493 * Returns 0 if the wait was successful ie. the last batch referencing the
1494 * object has completed within the allotted time. Otherwise some negative return
1495 * value describes the error. Of particular interest is -ETIME when the wait has
1496 * failed to yield the desired result.
1498 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1499 * the operation to give up after a certain amount of time. Another subtle
1500 * difference is the internal locking semantics are different (this variant does
1501 * not hold the lock for the duration of the wait). This makes the wait subject
1502 * to a larger userspace race window.
1504 * The implementation shall wait until the object is no longer actively
1505 * referenced within a batch buffer at the time of the call. The wait will
1506 * not guarantee that the buffer is re-issued via another thread, or an flinked
1507 * handle. Userspace must make sure this race does not occur if such precision
1510 int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
1512 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1513 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1514 struct drm_i915_gem_wait wait;
1517 if (!bufmgr_gem->has_wait_timeout) {
1518 DBG("%s:%d: Timed wait is not supported. Falling back to "
1519 "infinite wait\n", __FILE__, __LINE__);
1521 drm_intel_gem_bo_wait_rendering(bo);
1524 return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
1528 wait.bo_handle = bo_gem->gem_handle;
1529 wait.timeout_ns = timeout_ns;
1531 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1539 * Sets the object to the GTT read and possibly write domain, used by the X
1540 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1542 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1543 * can do tiled pixmaps this way.
1546 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1548 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1549 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1550 struct drm_i915_gem_set_domain set_domain;
1553 VG_CLEAR(set_domain);
1554 set_domain.handle = bo_gem->gem_handle;
1555 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1556 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1557 ret = drmIoctl(bufmgr_gem->fd,
1558 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1561 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1562 __FILE__, __LINE__, bo_gem->gem_handle,
1563 set_domain.read_domains, set_domain.write_domain,
1569 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1571 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1574 free(bufmgr_gem->exec2_objects);
1575 free(bufmgr_gem->exec_objects);
1576 free(bufmgr_gem->exec_bos);
1578 pthread_mutex_destroy(&bufmgr_gem->lock);
1580 /* Free any cached buffer objects we were going to reuse */
1581 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1582 struct drm_intel_gem_bo_bucket *bucket =
1583 &bufmgr_gem->cache_bucket[i];
1584 drm_intel_bo_gem *bo_gem;
1586 while (!DRMLISTEMPTY(&bucket->head)) {
1587 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1588 bucket->head.next, head);
1589 DRMLISTDEL(&bo_gem->head);
1591 drm_intel_gem_bo_free(&bo_gem->bo);
1599 * Adds the target buffer to the validation list and adds the relocation
1600 * to the reloc_buffer's relocation list.
1602 * The relocation entry at the given offset must already contain the
1603 * precomputed relocation value, because the kernel will optimize out
1604 * the relocation entry write when the buffer hasn't moved from the
1605 * last known offset in target_bo.
1608 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1609 drm_intel_bo *target_bo, uint32_t target_offset,
1610 uint32_t read_domains, uint32_t write_domain,
1613 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1614 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1615 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1616 bool fenced_command;
1618 if (bo_gem->has_error)
1621 if (target_bo_gem->has_error) {
1622 bo_gem->has_error = true;
1626 /* We never use HW fences for rendering on 965+ */
1627 if (bufmgr_gem->gen >= 4)
1630 fenced_command = need_fence;
1631 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1634 /* Create a new relocation list if needed */
1635 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1638 /* Check overflow */
1639 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1642 assert(offset <= bo->size - 4);
1643 assert((write_domain & (write_domain - 1)) == 0);
1645 /* Make sure that we're not adding a reloc to something whose size has
1646 * already been accounted for.
1648 assert(!bo_gem->used_as_reloc_target);
1649 if (target_bo_gem != bo_gem) {
1650 target_bo_gem->used_as_reloc_target = true;
1651 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1653 /* An object needing a fence is a tiled buffer, so it won't have
1654 * relocs to other buffers.
1657 target_bo_gem->reloc_tree_fences = 1;
1658 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1660 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1661 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1662 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1663 target_bo_gem->gem_handle;
1664 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1665 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1666 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1668 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1669 if (target_bo != bo)
1670 drm_intel_gem_bo_reference(target_bo);
1672 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1673 DRM_INTEL_RELOC_FENCE;
1675 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1677 bo_gem->reloc_count++;
1683 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1684 drm_intel_bo *target_bo, uint32_t target_offset,
1685 uint32_t read_domains, uint32_t write_domain)
1687 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1689 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1690 read_domains, write_domain,
1691 !bufmgr_gem->fenced_relocs);
1695 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1696 drm_intel_bo *target_bo,
1697 uint32_t target_offset,
1698 uint32_t read_domains, uint32_t write_domain)
1700 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1701 read_domains, write_domain, true);
1705 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1707 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1709 return bo_gem->reloc_count;
1713 * Removes existing relocation entries in the BO after "start".
1715 * This allows a user to avoid a two-step process for state setup with
1716 * counting up all the buffer objects and doing a
1717 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1718 * relocations for the state setup. Instead, save the state of the
1719 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1720 * state, and then check if it still fits in the aperture.
1722 * Any further drm_intel_bufmgr_check_aperture_space() queries
1723 * involving this buffer in the tree are undefined after this call.
1726 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1728 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1730 struct timespec time;
1732 clock_gettime(CLOCK_MONOTONIC, &time);
1734 assert(bo_gem->reloc_count >= start);
1735 /* Unreference the cleared target buffers */
1736 for (i = start; i < bo_gem->reloc_count; i++) {
1737 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
1738 if (&target_bo_gem->bo != bo) {
1739 bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
1740 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
1744 bo_gem->reloc_count = start;
1748 * Walk the tree of relocations rooted at BO and accumulate the list of
1749 * validations to be performed and update the relocation buffers with
1750 * index values into the validation list.
1753 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1755 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1758 if (bo_gem->relocs == NULL)
1761 for (i = 0; i < bo_gem->reloc_count; i++) {
1762 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1764 if (target_bo == bo)
1767 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1769 /* Continue walking the tree depth-first. */
1770 drm_intel_gem_bo_process_reloc(target_bo);
1772 /* Add the target to the validate list */
1773 drm_intel_add_validate_buffer(target_bo);
1778 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1780 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1783 if (bo_gem->relocs == NULL)
1786 for (i = 0; i < bo_gem->reloc_count; i++) {
1787 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1790 if (target_bo == bo)
1793 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1795 /* Continue walking the tree depth-first. */
1796 drm_intel_gem_bo_process_reloc2(target_bo);
1798 need_fence = (bo_gem->reloc_target_info[i].flags &
1799 DRM_INTEL_RELOC_FENCE);
1801 /* Add the target to the validate list */
1802 drm_intel_add_validate_buffer2(target_bo, need_fence);
1808 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1812 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1813 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1814 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1816 /* Update the buffer offset */
1817 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1818 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1819 bo_gem->gem_handle, bo_gem->name, bo->offset,
1820 (unsigned long long)bufmgr_gem->exec_objects[i].
1822 bo->offset = bufmgr_gem->exec_objects[i].offset;
1828 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1832 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1833 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1834 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1836 /* Update the buffer offset */
1837 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1838 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1839 bo_gem->gem_handle, bo_gem->name, bo->offset,
1840 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1841 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1847 aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data)
1849 fwrite(&data, 1, 4, bufmgr_gem->aub_file);
1853 aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size)
1855 fwrite(data, 1, size, bufmgr_gem->aub_file);
1859 aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size)
1861 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1862 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1866 data = malloc(bo->size);
1867 drm_intel_bo_get_subdata(bo, offset, size, data);
1869 /* Easy mode: write out bo with no relocations */
1870 if (!bo_gem->reloc_count) {
1871 aub_out_data(bufmgr_gem, data, size);
1876 /* Otherwise, handle the relocations while writing. */
1877 for (i = 0; i < size / 4; i++) {
1879 for (r = 0; r < bo_gem->reloc_count; r++) {
1880 struct drm_i915_gem_relocation_entry *reloc;
1881 drm_intel_reloc_target *info;
1883 reloc = &bo_gem->relocs[r];
1884 info = &bo_gem->reloc_target_info[r];
1886 if (reloc->offset == offset + i * 4) {
1887 drm_intel_bo_gem *target_gem;
1890 target_gem = (drm_intel_bo_gem *)info->bo;
1893 val += target_gem->aub_offset;
1895 aub_out(bufmgr_gem, val);
1900 if (r == bo_gem->reloc_count) {
1901 /* no relocation, just the data */
1902 aub_out(bufmgr_gem, data[i]);
1910 aub_bo_get_address(drm_intel_bo *bo)
1912 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1913 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1915 /* Give the object a graphics address in the AUB file. We
1916 * don't just use the GEM object address because we do AUB
1917 * dumping before execution -- we want to successfully log
1918 * when the hardware might hang, and we might even want to aub
1919 * capture for a driver trying to execute on a different
1920 * generation of hardware by disabling the actual kernel exec
1923 bo_gem->aub_offset = bufmgr_gem->aub_offset;
1924 bufmgr_gem->aub_offset += bo->size;
1925 /* XXX: Handle aperture overflow. */
1926 assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024);
1930 aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
1931 uint32_t offset, uint32_t size)
1933 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1934 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1937 CMD_AUB_TRACE_HEADER_BLOCK |
1940 AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE);
1941 aub_out(bufmgr_gem, subtype);
1942 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
1943 aub_out(bufmgr_gem, size);
1944 aub_write_bo_data(bo, offset, size);
1948 * Break up large objects into multiple writes. Otherwise a 128kb VBO
1949 * would overflow the 16 bits of size field in the packet header and
1950 * everything goes badly after that.
1953 aub_write_large_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
1954 uint32_t offset, uint32_t size)
1956 uint32_t block_size;
1957 uint32_t sub_offset;
1959 for (sub_offset = 0; sub_offset < size; sub_offset += block_size) {
1960 block_size = size - sub_offset;
1962 if (block_size > 8 * 4096)
1963 block_size = 8 * 4096;
1965 aub_write_trace_block(bo, type, subtype, offset + sub_offset,
1971 aub_write_bo(drm_intel_bo *bo)
1973 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1974 uint32_t offset = 0;
1977 aub_bo_get_address(bo);
1979 /* Write out each annotated section separately. */
1980 for (i = 0; i < bo_gem->aub_annotation_count; ++i) {
1981 drm_intel_aub_annotation *annotation =
1982 &bo_gem->aub_annotations[i];
1983 uint32_t ending_offset = annotation->ending_offset;
1984 if (ending_offset > bo->size)
1985 ending_offset = bo->size;
1986 if (ending_offset > offset) {
1987 aub_write_large_trace_block(bo, annotation->type,
1988 annotation->subtype,
1990 ending_offset - offset);
1991 offset = ending_offset;
1995 /* Write out any remaining unannotated data */
1996 if (offset < bo->size) {
1997 aub_write_large_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
1998 offset, bo->size - offset);
2003 * Make a ringbuffer on fly and dump it
2006 aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
2007 uint32_t batch_buffer, int ring_flag)
2009 uint32_t ringbuffer[4096];
2010 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
2013 if (ring_flag == I915_EXEC_BSD)
2014 ring = AUB_TRACE_TYPE_RING_PRB1;
2015 else if (ring_flag == I915_EXEC_BLT)
2016 ring = AUB_TRACE_TYPE_RING_PRB2;
2018 /* Make a ring buffer to execute our batchbuffer. */
2019 memset(ringbuffer, 0, sizeof(ringbuffer));
2020 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
2021 ringbuffer[ring_count++] = batch_buffer;
2023 /* Write out the ring. This appears to trigger execution of
2024 * the ring in the simulator.
2027 CMD_AUB_TRACE_HEADER_BLOCK |
2030 AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
2031 aub_out(bufmgr_gem, 0); /* general/surface subtype */
2032 aub_out(bufmgr_gem, bufmgr_gem->aub_offset);
2033 aub_out(bufmgr_gem, ring_count * 4);
2035 /* FIXME: Need some flush operations here? */
2036 aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4);
2038 /* Update offset pointer */
2039 bufmgr_gem->aub_offset += 4096;
2043 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
2044 int x1, int y1, int width, int height,
2045 enum aub_dump_bmp_format format,
2046 int pitch, int offset)
2048 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2049 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2053 case AUB_DUMP_BMP_FORMAT_8BIT:
2056 case AUB_DUMP_BMP_FORMAT_ARGB_4444:
2059 case AUB_DUMP_BMP_FORMAT_ARGB_0888:
2060 case AUB_DUMP_BMP_FORMAT_ARGB_8888:
2064 printf("Unknown AUB dump format %d\n", format);
2068 if (!bufmgr_gem->aub_file)
2071 aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4);
2072 aub_out(bufmgr_gem, (y1 << 16) | x1);
2077 aub_out(bufmgr_gem, (height << 16) | width);
2078 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
2080 ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) |
2081 ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0));
2085 aub_exec(drm_intel_bo *bo, int ring_flag, int used)
2087 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2088 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2090 bool batch_buffer_needs_annotations;
2092 if (!bufmgr_gem->aub_file)
2095 /* If batch buffer is not annotated, annotate it the best we
2098 batch_buffer_needs_annotations = bo_gem->aub_annotation_count == 0;
2099 if (batch_buffer_needs_annotations) {
2100 drm_intel_aub_annotation annotations[2] = {
2101 { AUB_TRACE_TYPE_BATCH, 0, used },
2102 { AUB_TRACE_TYPE_NOTYPE, 0, bo->size }
2104 drm_intel_bufmgr_gem_set_aub_annotations(bo, annotations, 2);
2107 /* Write out all buffers to AUB memory */
2108 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2109 aub_write_bo(bufmgr_gem->exec_bos[i]);
2112 /* Remove any annotations we added */
2113 if (batch_buffer_needs_annotations)
2114 drm_intel_bufmgr_gem_set_aub_annotations(bo, NULL, 0);
2116 /* Dump ring buffer */
2117 aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag);
2119 fflush(bufmgr_gem->aub_file);
2122 * One frame has been dumped. So reset the aub_offset for the next frame.
2124 * FIXME: Can we do this?
2126 bufmgr_gem->aub_offset = 0x10000;
2130 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2131 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
2133 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2134 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2135 struct drm_i915_gem_execbuffer execbuf;
2138 if (bo_gem->has_error)
2141 pthread_mutex_lock(&bufmgr_gem->lock);
2142 /* Update indices and set up the validate list. */
2143 drm_intel_gem_bo_process_reloc(bo);
2145 /* Add the batch buffer to the validation list. There are no
2146 * relocations pointing to it.
2148 drm_intel_add_validate_buffer(bo);
2151 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2152 execbuf.buffer_count = bufmgr_gem->exec_count;
2153 execbuf.batch_start_offset = 0;
2154 execbuf.batch_len = used;
2155 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2156 execbuf.num_cliprects = num_cliprects;
2160 ret = drmIoctl(bufmgr_gem->fd,
2161 DRM_IOCTL_I915_GEM_EXECBUFFER,
2165 if (errno == ENOSPC) {
2166 DBG("Execbuffer fails to pin. "
2167 "Estimate: %u. Actual: %u. Available: %u\n",
2168 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2171 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2174 (unsigned int)bufmgr_gem->gtt_size);
2177 drm_intel_update_buffer_offsets(bufmgr_gem);
2179 if (bufmgr_gem->bufmgr.debug)
2180 drm_intel_gem_dump_validation_list(bufmgr_gem);
2182 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2183 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2184 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2186 /* Disconnect the buffer from the validate list */
2187 bo_gem->validate_index = -1;
2188 bufmgr_gem->exec_bos[i] = NULL;
2190 bufmgr_gem->exec_count = 0;
2191 pthread_mutex_unlock(&bufmgr_gem->lock);
2197 do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2198 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2201 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2202 struct drm_i915_gem_execbuffer2 execbuf;
2206 switch (flags & 0x7) {
2210 if (!bufmgr_gem->has_blt)
2214 if (!bufmgr_gem->has_bsd)
2217 case I915_EXEC_VEBOX:
2218 if (!bufmgr_gem->has_vebox)
2221 case I915_EXEC_RENDER:
2222 case I915_EXEC_DEFAULT:
2226 pthread_mutex_lock(&bufmgr_gem->lock);
2227 /* Update indices and set up the validate list. */
2228 drm_intel_gem_bo_process_reloc2(bo);
2230 /* Add the batch buffer to the validation list. There are no relocations
2233 drm_intel_add_validate_buffer2(bo, 0);
2236 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2237 execbuf.buffer_count = bufmgr_gem->exec_count;
2238 execbuf.batch_start_offset = 0;
2239 execbuf.batch_len = used;
2240 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2241 execbuf.num_cliprects = num_cliprects;
2244 execbuf.flags = flags;
2246 i915_execbuffer2_set_context_id(execbuf, 0);
2248 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
2251 aub_exec(bo, flags, used);
2253 if (bufmgr_gem->no_exec)
2254 goto skip_execution;
2256 ret = drmIoctl(bufmgr_gem->fd,
2257 DRM_IOCTL_I915_GEM_EXECBUFFER2,
2261 if (ret == -ENOSPC) {
2262 DBG("Execbuffer fails to pin. "
2263 "Estimate: %u. Actual: %u. Available: %u\n",
2264 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2265 bufmgr_gem->exec_count),
2266 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2267 bufmgr_gem->exec_count),
2268 (unsigned int) bufmgr_gem->gtt_size);
2271 drm_intel_update_buffer_offsets2(bufmgr_gem);
2274 if (bufmgr_gem->bufmgr.debug)
2275 drm_intel_gem_dump_validation_list(bufmgr_gem);
2277 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2278 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2279 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2281 /* Disconnect the buffer from the validate list */
2282 bo_gem->validate_index = -1;
2283 bufmgr_gem->exec_bos[i] = NULL;
2285 bufmgr_gem->exec_count = 0;
2286 pthread_mutex_unlock(&bufmgr_gem->lock);
2292 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2293 drm_clip_rect_t *cliprects, int num_cliprects,
2296 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2301 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2302 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2305 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2310 drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2311 int used, unsigned int flags)
2313 return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
2317 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2319 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2320 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2321 struct drm_i915_gem_pin pin;
2325 pin.handle = bo_gem->gem_handle;
2326 pin.alignment = alignment;
2328 ret = drmIoctl(bufmgr_gem->fd,
2329 DRM_IOCTL_I915_GEM_PIN,
2334 bo->offset = pin.offset;
2339 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2341 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2342 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2343 struct drm_i915_gem_unpin unpin;
2347 unpin.handle = bo_gem->gem_handle;
2349 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2357 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2358 uint32_t tiling_mode,
2361 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2362 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2363 struct drm_i915_gem_set_tiling set_tiling;
2366 if (bo_gem->global_name == 0 &&
2367 tiling_mode == bo_gem->tiling_mode &&
2368 stride == bo_gem->stride)
2371 memset(&set_tiling, 0, sizeof(set_tiling));
2373 /* set_tiling is slightly broken and overwrites the
2374 * input on the error path, so we have to open code
2377 set_tiling.handle = bo_gem->gem_handle;
2378 set_tiling.tiling_mode = tiling_mode;
2379 set_tiling.stride = stride;
2381 ret = ioctl(bufmgr_gem->fd,
2382 DRM_IOCTL_I915_GEM_SET_TILING,
2384 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2388 bo_gem->tiling_mode = set_tiling.tiling_mode;
2389 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2390 bo_gem->stride = set_tiling.stride;
2395 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2398 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2399 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2402 /* Linear buffers have no stride. By ensuring that we only ever use
2403 * stride 0 with linear buffers, we simplify our code.
2405 if (*tiling_mode == I915_TILING_NONE)
2408 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2410 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
2412 *tiling_mode = bo_gem->tiling_mode;
2417 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2418 uint32_t * swizzle_mode)
2420 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2422 *tiling_mode = bo_gem->tiling_mode;
2423 *swizzle_mode = bo_gem->swizzle_mode;
2428 drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
2430 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2433 drm_intel_bo_gem *bo_gem;
2434 struct drm_i915_gem_get_tiling get_tiling;
2436 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2438 fprintf(stderr,"ret is %d %d\n", ret, errno);
2442 bo_gem = calloc(1, sizeof(*bo_gem));
2446 bo_gem->bo.size = size;
2447 bo_gem->bo.handle = handle;
2448 bo_gem->bo.bufmgr = bufmgr;
2450 bo_gem->gem_handle = handle;
2452 atomic_set(&bo_gem->refcount, 1);
2454 bo_gem->name = "prime";
2455 bo_gem->validate_index = -1;
2456 bo_gem->reloc_tree_fences = 0;
2457 bo_gem->used_as_reloc_target = false;
2458 bo_gem->has_error = false;
2459 bo_gem->reusable = false;
2461 DRMINITLISTHEAD(&bo_gem->name_list);
2462 DRMINITLISTHEAD(&bo_gem->vma_list);
2464 VG_CLEAR(get_tiling);
2465 get_tiling.handle = bo_gem->gem_handle;
2466 ret = drmIoctl(bufmgr_gem->fd,
2467 DRM_IOCTL_I915_GEM_GET_TILING,
2470 drm_intel_gem_bo_unreference(&bo_gem->bo);
2473 bo_gem->tiling_mode = get_tiling.tiling_mode;
2474 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
2475 /* XXX stride is unknown */
2476 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
2482 drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
2484 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2485 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2487 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
2488 DRM_CLOEXEC, prime_fd) != 0)
2491 bo_gem->reusable = false;
2497 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2499 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2500 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2503 if (!bo_gem->global_name) {
2504 struct drm_gem_flink flink;
2507 flink.handle = bo_gem->gem_handle;
2509 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
2513 bo_gem->global_name = flink.name;
2514 bo_gem->reusable = false;
2516 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2519 *name = bo_gem->global_name;
2524 * Enables unlimited caching of buffer objects for reuse.
2526 * This is potentially very memory expensive, as the cache at each bucket
2527 * size is only bounded by how many buffers of that size we've managed to have
2528 * in flight at once.
2531 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2533 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2535 bufmgr_gem->bo_reuse = true;
2539 * Enable use of fenced reloc type.
2541 * New code should enable this to avoid unnecessary fence register
2542 * allocation. If this option is not enabled, all relocs will have fence
2543 * register allocated.
2546 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2548 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2550 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2551 bufmgr_gem->fenced_relocs = true;
2555 * Return the additional aperture space required by the tree of buffer objects
2559 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2561 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2565 if (bo == NULL || bo_gem->included_in_check_aperture)
2569 bo_gem->included_in_check_aperture = true;
2571 for (i = 0; i < bo_gem->reloc_count; i++)
2573 drm_intel_gem_bo_get_aperture_space(bo_gem->
2574 reloc_target_info[i].bo);
2580 * Count the number of buffers in this list that need a fence reg
2582 * If the count is greater than the number of available regs, we'll have
2583 * to ask the caller to resubmit a batch with fewer tiled buffers.
2585 * This function over-counts if the same buffer is used multiple times.
2588 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2591 unsigned int total = 0;
2593 for (i = 0; i < count; i++) {
2594 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2599 total += bo_gem->reloc_tree_fences;
2605 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2606 * for the next drm_intel_bufmgr_check_aperture_space() call.
2609 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2611 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2614 if (bo == NULL || !bo_gem->included_in_check_aperture)
2617 bo_gem->included_in_check_aperture = false;
2619 for (i = 0; i < bo_gem->reloc_count; i++)
2620 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2621 reloc_target_info[i].bo);
2625 * Return a conservative estimate for the amount of aperture required
2626 * for a collection of buffers. This may double-count some buffers.
2629 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2632 unsigned int total = 0;
2634 for (i = 0; i < count; i++) {
2635 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2637 total += bo_gem->reloc_tree_size;
2643 * Return the amount of aperture needed for a collection of buffers.
2644 * This avoids double counting any buffers, at the cost of looking
2645 * at every buffer in the set.
2648 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2651 unsigned int total = 0;
2653 for (i = 0; i < count; i++) {
2654 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2655 /* For the first buffer object in the array, we get an
2656 * accurate count back for its reloc_tree size (since nothing
2657 * had been flagged as being counted yet). We can save that
2658 * value out as a more conservative reloc_tree_size that
2659 * avoids double-counting target buffers. Since the first
2660 * buffer happens to usually be the batch buffer in our
2661 * callers, this can pull us back from doing the tree
2662 * walk on every new batch emit.
2665 drm_intel_bo_gem *bo_gem =
2666 (drm_intel_bo_gem *) bo_array[i];
2667 bo_gem->reloc_tree_size = total;
2671 for (i = 0; i < count; i++)
2672 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2677 * Return -1 if the batchbuffer should be flushed before attempting to
2678 * emit rendering referencing the buffers pointed to by bo_array.
2680 * This is required because if we try to emit a batchbuffer with relocations
2681 * to a tree of buffers that won't simultaneously fit in the aperture,
2682 * the rendering will return an error at a point where the software is not
2683 * prepared to recover from it.
2685 * However, we also want to emit the batchbuffer significantly before we reach
2686 * the limit, as a series of batchbuffers each of which references buffers
2687 * covering almost all of the aperture means that at each emit we end up
2688 * waiting to evict a buffer from the last rendering, and we get synchronous
2689 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2690 * get better parallelism.
2693 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2695 drm_intel_bufmgr_gem *bufmgr_gem =
2696 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2697 unsigned int total = 0;
2698 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2701 /* Check for fence reg constraints if necessary */
2702 if (bufmgr_gem->available_fences) {
2703 total_fences = drm_intel_gem_total_fences(bo_array, count);
2704 if (total_fences > bufmgr_gem->available_fences)
2708 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2710 if (total > threshold)
2711 total = drm_intel_gem_compute_batch_space(bo_array, count);
2713 if (total > threshold) {
2714 DBG("check_space: overflowed available aperture, "
2716 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2719 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2720 (int)bufmgr_gem->gtt_size / 1024);
2726 * Disable buffer reuse for objects which are shared with the kernel
2727 * as scanout buffers
2730 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2732 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2734 bo_gem->reusable = false;
2739 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2741 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2743 return bo_gem->reusable;
2747 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2749 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2752 for (i = 0; i < bo_gem->reloc_count; i++) {
2753 if (bo_gem->reloc_target_info[i].bo == target_bo)
2755 if (bo == bo_gem->reloc_target_info[i].bo)
2757 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2765 /** Return true if target_bo is referenced by bo's relocation tree. */
2767 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2769 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2771 if (bo == NULL || target_bo == NULL)
2773 if (target_bo_gem->used_as_reloc_target)
2774 return _drm_intel_gem_bo_references(bo, target_bo);
2779 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2781 unsigned int i = bufmgr_gem->num_buckets;
2783 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2785 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2786 bufmgr_gem->cache_bucket[i].size = size;
2787 bufmgr_gem->num_buckets++;
2791 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2793 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2795 /* OK, so power of two buckets was too wasteful of memory.
2796 * Give 3 other sizes between each power of two, to hopefully
2797 * cover things accurately enough. (The alternative is
2798 * probably to just go for exact matching of sizes, and assume
2799 * that for things like composited window resize the tiled
2800 * width/height alignment and rounding of sizes to pages will
2801 * get us useful cache hit rates anyway)
2803 add_bucket(bufmgr_gem, 4096);
2804 add_bucket(bufmgr_gem, 4096 * 2);
2805 add_bucket(bufmgr_gem, 4096 * 3);
2807 /* Initialize the linked lists for BO reuse cache. */
2808 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2809 add_bucket(bufmgr_gem, size);
2811 add_bucket(bufmgr_gem, size + size * 1 / 4);
2812 add_bucket(bufmgr_gem, size + size * 2 / 4);
2813 add_bucket(bufmgr_gem, size + size * 3 / 4);
2818 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
2820 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2822 bufmgr_gem->vma_max = limit;
2824 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
2828 * Get the PCI ID for the device. This can be overridden by setting the
2829 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
2832 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
2834 char *devid_override;
2837 drm_i915_getparam_t gp;
2839 if (geteuid() == getuid()) {
2840 devid_override = getenv("INTEL_DEVID_OVERRIDE");
2841 if (devid_override) {
2842 bufmgr_gem->no_exec = true;
2843 return strtod(devid_override, NULL);
2849 gp.param = I915_PARAM_CHIPSET_ID;
2851 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2853 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2854 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2860 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
2862 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2864 return bufmgr_gem->pci_device;
2868 * Sets up AUB dumping.
2870 * This is a trace file format that can be used with the simulator.
2871 * Packets are emitted in a format somewhat like GPU command packets.
2872 * You can set up a GTT and upload your objects into the referenced
2873 * space, then send off batchbuffers and get BMPs out the other end.
2876 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
2878 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2879 int entry = 0x200003;
2881 int gtt_size = 0x10000;
2884 if (bufmgr_gem->aub_file) {
2885 fclose(bufmgr_gem->aub_file);
2886 bufmgr_gem->aub_file = NULL;
2891 if (geteuid() != getuid())
2894 bufmgr_gem->aub_file = fopen("intel.aub", "w+");
2895 if (!bufmgr_gem->aub_file)
2898 /* Start allocating objects from just after the GTT. */
2899 bufmgr_gem->aub_offset = gtt_size;
2901 /* Start with a (required) version packet. */
2902 aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2));
2904 (4 << AUB_HEADER_MAJOR_SHIFT) |
2905 (0 << AUB_HEADER_MINOR_SHIFT));
2906 for (i = 0; i < 8; i++) {
2907 aub_out(bufmgr_gem, 0); /* app name */
2909 aub_out(bufmgr_gem, 0); /* timestamp */
2910 aub_out(bufmgr_gem, 0); /* timestamp */
2911 aub_out(bufmgr_gem, 0); /* comment len */
2913 /* Set up the GTT. The max we can handle is 256M */
2914 aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | (5 - 2));
2915 aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
2916 aub_out(bufmgr_gem, 0); /* subtype */
2917 aub_out(bufmgr_gem, 0); /* offset */
2918 aub_out(bufmgr_gem, gtt_size); /* size */
2919 for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
2920 aub_out(bufmgr_gem, entry);
2925 drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
2927 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2928 struct drm_i915_gem_context_create create;
2929 drm_intel_context *context = NULL;
2933 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
2935 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
2940 context = calloc(1, sizeof(*context));
2941 context->ctx_id = create.ctx_id;
2942 context->bufmgr = bufmgr;
2948 drm_intel_gem_context_destroy(drm_intel_context *ctx)
2950 drm_intel_bufmgr_gem *bufmgr_gem;
2951 struct drm_i915_gem_context_destroy destroy;
2959 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
2960 destroy.ctx_id = ctx->ctx_id;
2961 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
2964 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
2971 drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
2975 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2976 struct drm_i915_reg_read reg_read;
2980 reg_read.offset = offset;
2982 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read);
2984 *result = reg_read.val;
2990 * Annotate the given bo for use in aub dumping.
2992 * \param annotations is an array of drm_intel_aub_annotation objects
2993 * describing the type of data in various sections of the bo. Each
2994 * element of the array specifies the type and subtype of a section of
2995 * the bo, and the past-the-end offset of that section. The elements
2996 * of \c annotations must be sorted so that ending_offset is
2999 * \param count is the number of elements in the \c annotations array.
3000 * If \c count is zero, then \c annotations will not be dereferenced.
3002 * Annotations are copied into a private data structure, so caller may
3003 * re-use the memory pointed to by \c annotations after the call
3006 * Annotations are stored for the lifetime of the bo; to reset to the
3007 * default state (no annotations), call this function with a \c count
3011 drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
3012 drm_intel_aub_annotation *annotations,
3015 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3016 unsigned size = sizeof(*annotations) * count;
3017 drm_intel_aub_annotation *new_annotations =
3018 count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL;
3019 if (new_annotations == NULL) {
3020 free(bo_gem->aub_annotations);
3021 bo_gem->aub_annotations = NULL;
3022 bo_gem->aub_annotation_count = 0;
3025 memcpy(new_annotations, annotations, size);
3026 bo_gem->aub_annotations = new_annotations;
3027 bo_gem->aub_annotation_count = count;
3031 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3032 * and manage map buffer objections.
3034 * \param fd File descriptor of the opened DRM device.
3037 drm_intel_bufmgr_gem_init(int fd, int batch_size)
3039 drm_intel_bufmgr_gem *bufmgr_gem;
3040 struct drm_i915_gem_get_aperture aperture;
3041 drm_i915_getparam_t gp;
3045 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
3046 if (bufmgr_gem == NULL)
3049 bufmgr_gem->fd = fd;
3051 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
3056 ret = drmIoctl(bufmgr_gem->fd,
3057 DRM_IOCTL_I915_GEM_GET_APERTURE,
3061 bufmgr_gem->gtt_size = aperture.aper_available_size;
3063 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3065 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
3066 fprintf(stderr, "Assuming %dkB available aperture size.\n"
3067 "May lead to reduced performance or incorrect "
3069 (int)bufmgr_gem->gtt_size / 1024);
3072 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
3074 if (IS_GEN2(bufmgr_gem->pci_device))
3075 bufmgr_gem->gen = 2;
3076 else if (IS_GEN3(bufmgr_gem->pci_device))
3077 bufmgr_gem->gen = 3;
3078 else if (IS_GEN4(bufmgr_gem->pci_device))
3079 bufmgr_gem->gen = 4;
3080 else if (IS_GEN5(bufmgr_gem->pci_device))
3081 bufmgr_gem->gen = 5;
3082 else if (IS_GEN6(bufmgr_gem->pci_device))
3083 bufmgr_gem->gen = 6;
3084 else if (IS_GEN7(bufmgr_gem->pci_device))
3085 bufmgr_gem->gen = 7;
3091 if (IS_GEN3(bufmgr_gem->pci_device) &&
3092 bufmgr_gem->gtt_size > 256*1024*1024) {
3093 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3094 * be used for tiled blits. To simplify the accounting, just
3095 * substract the unmappable part (fixed to 256MB on all known
3096 * gen3 devices) if the kernel advertises it. */
3097 bufmgr_gem->gtt_size -= 256*1024*1024;
3103 gp.param = I915_PARAM_HAS_EXECBUF2;
3104 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3108 gp.param = I915_PARAM_HAS_BSD;
3109 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3110 bufmgr_gem->has_bsd = ret == 0;
3112 gp.param = I915_PARAM_HAS_BLT;
3113 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3114 bufmgr_gem->has_blt = ret == 0;
3116 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
3117 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3118 bufmgr_gem->has_relaxed_fencing = ret == 0;
3120 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3121 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3122 bufmgr_gem->has_wait_timeout = ret == 0;
3124 gp.param = I915_PARAM_HAS_LLC;
3125 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3127 /* Kernel does not supports HAS_LLC query, fallback to GPU
3128 * generation detection and assume that we have LLC on GEN6/7
3130 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3131 IS_GEN7(bufmgr_gem->pci_device));
3133 bufmgr_gem->has_llc = *gp.value;
3135 gp.param = I915_PARAM_HAS_VEBOX;
3136 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3137 bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
3139 if (bufmgr_gem->gen < 4) {
3140 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
3141 gp.value = &bufmgr_gem->available_fences;
3142 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3144 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
3146 fprintf(stderr, "param: %d, val: %d\n", gp.param,
3148 bufmgr_gem->available_fences = 0;
3150 /* XXX The kernel reports the total number of fences,
3151 * including any that may be pinned.
3153 * We presume that there will be at least one pinned
3154 * fence for the scanout buffer, but there may be more
3155 * than one scanout and the user may be manually
3156 * pinning buffers. Let's move to execbuffer2 and
3157 * thereby forget the insanity of using fences...
3159 bufmgr_gem->available_fences -= 2;
3160 if (bufmgr_gem->available_fences < 0)
3161 bufmgr_gem->available_fences = 0;
3165 /* Let's go with one relocation per every 2 dwords (but round down a bit
3166 * since a power of two will mean an extra page allocation for the reloc
3169 * Every 4 was too few for the blender benchmark.
3171 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
3173 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
3174 bufmgr_gem->bufmgr.bo_alloc_for_render =
3175 drm_intel_gem_bo_alloc_for_render;
3176 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
3177 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
3178 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
3179 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
3180 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
3181 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
3182 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
3183 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
3184 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
3185 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
3186 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
3187 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
3188 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
3189 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
3190 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
3191 /* Use the new one if available */
3193 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
3194 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3196 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
3197 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
3198 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3199 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
3200 bufmgr_gem->bufmgr.debug = 0;
3201 bufmgr_gem->bufmgr.check_aperture_space =
3202 drm_intel_gem_check_aperture_space;
3203 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3204 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
3205 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
3206 drm_intel_gem_get_pipe_from_crtc_id;
3207 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
3209 DRMINITLISTHEAD(&bufmgr_gem->named);
3210 init_cache_buckets(bufmgr_gem);
3212 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
3213 bufmgr_gem->vma_max = -1; /* unlimited by default */
3215 return &bufmgr_gem->bufmgr;