1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
53 #include <sys/types.h>
58 #define ETIME ETIMEDOUT
61 #include "libdrm_lists.h"
62 #include "intel_bufmgr.h"
63 #include "intel_bufmgr_priv.h"
64 #include "intel_chipset.h"
65 #include "intel_aub.h"
78 #define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
80 #define DBG(...) do { \
81 if (bufmgr_gem->bufmgr.debug) \
82 fprintf(stderr, __VA_ARGS__); \
85 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
87 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
89 struct drm_intel_gem_bo_bucket {
94 typedef struct _drm_intel_bufmgr_gem {
95 drm_intel_bufmgr bufmgr;
103 pthread_mutex_t lock;
105 struct drm_i915_gem_exec_object *exec_objects;
106 struct drm_i915_gem_exec_object2 *exec2_objects;
107 drm_intel_bo **exec_bos;
111 /** Array of lists of cached gem objects of power-of-two sizes */
112 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
116 drmMMListHead managers;
119 drmMMListHead vma_cache;
120 int vma_count, vma_open, vma_max;
123 int available_fences;
126 unsigned int has_bsd : 1;
127 unsigned int has_blt : 1;
128 unsigned int has_relaxed_fencing : 1;
129 unsigned int has_llc : 1;
130 unsigned int has_wait_timeout : 1;
131 unsigned int bo_reuse : 1;
132 unsigned int no_exec : 1;
133 unsigned int has_vebox : 1;
139 } drm_intel_bufmgr_gem;
141 #define DRM_INTEL_RELOC_FENCE (1<<0)
143 typedef struct _drm_intel_reloc_target_info {
146 } drm_intel_reloc_target;
148 struct _drm_intel_bo_gem {
156 * Kenel-assigned global name for this object
158 * List contains both flink named and prime fd'd objects
160 unsigned int global_name;
161 drmMMListHead name_list;
164 * Index of the buffer within the validation list while preparing a
165 * batchbuffer execution.
170 * Current tiling mode
172 uint32_t tiling_mode;
173 uint32_t swizzle_mode;
174 unsigned long stride;
178 /** Array passed to the DRM containing relocation information. */
179 struct drm_i915_gem_relocation_entry *relocs;
181 * Array of info structs corresponding to relocs[i].target_handle etc
183 drm_intel_reloc_target *reloc_target_info;
184 /** Number of entries in relocs */
186 /** Mapped address for the buffer, saved across map/unmap cycles */
188 /** GTT virtual address for the buffer, saved across map/unmap cycles */
191 drmMMListHead vma_list;
197 * Boolean of whether this BO and its children have been included in
198 * the current drm_intel_bufmgr_check_aperture_space() total.
200 bool included_in_check_aperture;
203 * Boolean of whether this buffer has been used as a relocation
204 * target and had its size accounted for, and thus can't have any
205 * further relocations added to it.
207 bool used_as_reloc_target;
210 * Boolean of whether we have encountered an error whilst building the relocation tree.
215 * Boolean of whether this buffer can be re-used
220 * Boolean of whether the GPU is definitely not accessing the buffer.
222 * This is only valid when reusable, since non-reusable
223 * buffers are those that have been shared wth other
224 * processes, so we don't know their state.
229 * Size in bytes of this buffer and its relocation descendents.
231 * Used to avoid costly tree walking in
232 * drm_intel_bufmgr_check_aperture in the common case.
237 * Number of potential fence registers required by this buffer and its
240 int reloc_tree_fences;
242 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
243 bool mapped_cpu_write;
247 drm_intel_aub_annotation *aub_annotations;
248 unsigned aub_annotation_count;
252 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
255 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
258 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
259 uint32_t * swizzle_mode);
262 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
263 uint32_t tiling_mode,
266 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
269 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
271 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
274 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
275 uint32_t *tiling_mode)
277 unsigned long min_size, max_size;
280 if (*tiling_mode == I915_TILING_NONE)
283 /* 965+ just need multiples of page size for tiling */
284 if (bufmgr_gem->gen >= 4)
285 return ROUND_UP_TO(size, 4096);
287 /* Older chips need powers of two, of at least 512k or 1M */
288 if (bufmgr_gem->gen == 3) {
289 min_size = 1024*1024;
290 max_size = 128*1024*1024;
293 max_size = 64*1024*1024;
296 if (size > max_size) {
297 *tiling_mode = I915_TILING_NONE;
301 /* Do we need to allocate every page for the fence? */
302 if (bufmgr_gem->has_relaxed_fencing)
303 return ROUND_UP_TO(size, 4096);
305 for (i = min_size; i < size; i <<= 1)
312 * Round a given pitch up to the minimum required for X tiling on a
313 * given chip. We use 512 as the minimum to allow for a later tiling
317 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
318 unsigned long pitch, uint32_t *tiling_mode)
320 unsigned long tile_width;
323 /* If untiled, then just align it so that we can do rendering
324 * to it with the 3D engine.
326 if (*tiling_mode == I915_TILING_NONE)
327 return ALIGN(pitch, 64);
329 if (*tiling_mode == I915_TILING_X
330 || (IS_915(bufmgr_gem->pci_device)
331 && *tiling_mode == I915_TILING_Y))
336 /* 965 is flexible */
337 if (bufmgr_gem->gen >= 4)
338 return ROUND_UP_TO(pitch, tile_width);
340 /* The older hardware has a maximum pitch of 8192 with tiled
341 * surfaces, so fallback to untiled if it's too large.
344 *tiling_mode = I915_TILING_NONE;
345 return ALIGN(pitch, 64);
348 /* Pre-965 needs power of two tile width */
349 for (i = tile_width; i < pitch; i <<= 1)
355 static struct drm_intel_gem_bo_bucket *
356 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
361 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
362 struct drm_intel_gem_bo_bucket *bucket =
363 &bufmgr_gem->cache_bucket[i];
364 if (bucket->size >= size) {
373 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
377 for (i = 0; i < bufmgr_gem->exec_count; i++) {
378 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
379 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
381 if (bo_gem->relocs == NULL) {
382 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
387 for (j = 0; j < bo_gem->reloc_count; j++) {
388 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
389 drm_intel_bo_gem *target_gem =
390 (drm_intel_bo_gem *) target_bo;
392 DBG("%2d: %d (%s)@0x%08llx -> "
393 "%d (%s)@0x%08lx + 0x%08x\n",
395 bo_gem->gem_handle, bo_gem->name,
396 (unsigned long long)bo_gem->relocs[j].offset,
397 target_gem->gem_handle,
400 bo_gem->relocs[j].delta);
406 drm_intel_gem_bo_reference(drm_intel_bo *bo)
408 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
410 atomic_inc(&bo_gem->refcount);
414 * Adds the given buffer to the list of buffers to be validated (moved into the
415 * appropriate memory type) with the next batch submission.
417 * If a buffer is validated multiple times in a batch submission, it ends up
418 * with the intersection of the memory type flags and the union of the
422 drm_intel_add_validate_buffer(drm_intel_bo *bo)
424 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
425 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
428 if (bo_gem->validate_index != -1)
431 /* Extend the array of validation entries as necessary. */
432 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
433 int new_size = bufmgr_gem->exec_size * 2;
438 bufmgr_gem->exec_objects =
439 realloc(bufmgr_gem->exec_objects,
440 sizeof(*bufmgr_gem->exec_objects) * new_size);
441 bufmgr_gem->exec_bos =
442 realloc(bufmgr_gem->exec_bos,
443 sizeof(*bufmgr_gem->exec_bos) * new_size);
444 bufmgr_gem->exec_size = new_size;
447 index = bufmgr_gem->exec_count;
448 bo_gem->validate_index = index;
449 /* Fill in array entry */
450 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
451 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
452 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
453 bufmgr_gem->exec_objects[index].alignment = 0;
454 bufmgr_gem->exec_objects[index].offset = 0;
455 bufmgr_gem->exec_bos[index] = bo;
456 bufmgr_gem->exec_count++;
460 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
462 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
463 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
466 if (bo_gem->validate_index != -1) {
468 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
469 EXEC_OBJECT_NEEDS_FENCE;
473 /* Extend the array of validation entries as necessary. */
474 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
475 int new_size = bufmgr_gem->exec_size * 2;
480 bufmgr_gem->exec2_objects =
481 realloc(bufmgr_gem->exec2_objects,
482 sizeof(*bufmgr_gem->exec2_objects) * new_size);
483 bufmgr_gem->exec_bos =
484 realloc(bufmgr_gem->exec_bos,
485 sizeof(*bufmgr_gem->exec_bos) * new_size);
486 bufmgr_gem->exec_size = new_size;
489 index = bufmgr_gem->exec_count;
490 bo_gem->validate_index = index;
491 /* Fill in array entry */
492 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
493 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
494 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
495 bufmgr_gem->exec2_objects[index].alignment = 0;
496 bufmgr_gem->exec2_objects[index].offset = 0;
497 bufmgr_gem->exec_bos[index] = bo;
498 bufmgr_gem->exec2_objects[index].flags = 0;
499 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
500 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
502 bufmgr_gem->exec2_objects[index].flags |=
503 EXEC_OBJECT_NEEDS_FENCE;
505 bufmgr_gem->exec_count++;
508 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
512 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
513 drm_intel_bo_gem *bo_gem)
517 assert(!bo_gem->used_as_reloc_target);
519 /* The older chipsets are far-less flexible in terms of tiling,
520 * and require tiled buffer to be size aligned in the aperture.
521 * This means that in the worst possible case we will need a hole
522 * twice as large as the object in order for it to fit into the
523 * aperture. Optimal packing is for wimps.
525 size = bo_gem->bo.size;
526 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
529 if (bufmgr_gem->has_relaxed_fencing) {
530 if (bufmgr_gem->gen == 3)
531 min_size = 1024*1024;
535 while (min_size < size)
540 /* Account for worst-case alignment. */
544 bo_gem->reloc_tree_size = size;
548 drm_intel_setup_reloc_list(drm_intel_bo *bo)
550 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
551 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
552 unsigned int max_relocs = bufmgr_gem->max_relocs;
554 if (bo->size / 4 < max_relocs)
555 max_relocs = bo->size / 4;
557 bo_gem->relocs = malloc(max_relocs *
558 sizeof(struct drm_i915_gem_relocation_entry));
559 bo_gem->reloc_target_info = malloc(max_relocs *
560 sizeof(drm_intel_reloc_target));
561 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
562 bo_gem->has_error = true;
564 free (bo_gem->relocs);
565 bo_gem->relocs = NULL;
567 free (bo_gem->reloc_target_info);
568 bo_gem->reloc_target_info = NULL;
577 drm_intel_gem_bo_busy(drm_intel_bo *bo)
579 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
580 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
581 struct drm_i915_gem_busy busy;
584 if (bo_gem->reusable && bo_gem->idle)
588 busy.handle = bo_gem->gem_handle;
590 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
592 bo_gem->idle = !busy.busy;
597 return (ret == 0 && busy.busy);
601 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
602 drm_intel_bo_gem *bo_gem, int state)
604 struct drm_i915_gem_madvise madv;
607 madv.handle = bo_gem->gem_handle;
610 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
612 return madv.retained;
616 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
618 return drm_intel_gem_bo_madvise_internal
619 ((drm_intel_bufmgr_gem *) bo->bufmgr,
620 (drm_intel_bo_gem *) bo,
624 /* drop the oldest entries that have been purged by the kernel */
626 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
627 struct drm_intel_gem_bo_bucket *bucket)
629 while (!DRMLISTEMPTY(&bucket->head)) {
630 drm_intel_bo_gem *bo_gem;
632 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
633 bucket->head.next, head);
634 if (drm_intel_gem_bo_madvise_internal
635 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
638 DRMLISTDEL(&bo_gem->head);
639 drm_intel_gem_bo_free(&bo_gem->bo);
643 static drm_intel_bo *
644 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
648 uint32_t tiling_mode,
649 unsigned long stride)
651 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
652 drm_intel_bo_gem *bo_gem;
653 unsigned int page_size = getpagesize();
655 struct drm_intel_gem_bo_bucket *bucket;
656 bool alloc_from_cache;
657 unsigned long bo_size;
658 bool for_render = false;
660 if (flags & BO_ALLOC_FOR_RENDER)
663 /* Round the allocated size up to a power of two number of pages. */
664 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
666 /* If we don't have caching at this size, don't actually round the
669 if (bucket == NULL) {
671 if (bo_size < page_size)
674 bo_size = bucket->size;
677 pthread_mutex_lock(&bufmgr_gem->lock);
678 /* Get a buffer out of the cache if available */
680 alloc_from_cache = false;
681 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
683 /* Allocate new render-target BOs from the tail (MRU)
684 * of the list, as it will likely be hot in the GPU
685 * cache and in the aperture for us.
687 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
688 bucket->head.prev, head);
689 DRMLISTDEL(&bo_gem->head);
690 alloc_from_cache = true;
692 /* For non-render-target BOs (where we're probably
693 * going to map it first thing in order to fill it
694 * with data), check if the last BO in the cache is
695 * unbusy, and only reuse in that case. Otherwise,
696 * allocating a new buffer is probably faster than
697 * waiting for the GPU to finish.
699 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
700 bucket->head.next, head);
701 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
702 alloc_from_cache = true;
703 DRMLISTDEL(&bo_gem->head);
707 if (alloc_from_cache) {
708 if (!drm_intel_gem_bo_madvise_internal
709 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
710 drm_intel_gem_bo_free(&bo_gem->bo);
711 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
716 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
719 drm_intel_gem_bo_free(&bo_gem->bo);
724 pthread_mutex_unlock(&bufmgr_gem->lock);
726 if (!alloc_from_cache) {
727 struct drm_i915_gem_create create;
729 bo_gem = calloc(1, sizeof(*bo_gem));
733 bo_gem->bo.size = bo_size;
736 create.size = bo_size;
738 ret = drmIoctl(bufmgr_gem->fd,
739 DRM_IOCTL_I915_GEM_CREATE,
741 bo_gem->gem_handle = create.handle;
742 bo_gem->bo.handle = bo_gem->gem_handle;
747 bo_gem->bo.bufmgr = bufmgr;
749 bo_gem->tiling_mode = I915_TILING_NONE;
750 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
753 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
756 drm_intel_gem_bo_free(&bo_gem->bo);
760 DRMINITLISTHEAD(&bo_gem->name_list);
761 DRMINITLISTHEAD(&bo_gem->vma_list);
765 atomic_set(&bo_gem->refcount, 1);
766 bo_gem->validate_index = -1;
767 bo_gem->reloc_tree_fences = 0;
768 bo_gem->used_as_reloc_target = false;
769 bo_gem->has_error = false;
770 bo_gem->reusable = true;
771 bo_gem->aub_annotations = NULL;
772 bo_gem->aub_annotation_count = 0;
774 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
776 DBG("bo_create: buf %d (%s) %ldb\n",
777 bo_gem->gem_handle, bo_gem->name, size);
782 static drm_intel_bo *
783 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
786 unsigned int alignment)
788 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
790 I915_TILING_NONE, 0);
793 static drm_intel_bo *
794 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
797 unsigned int alignment)
799 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
800 I915_TILING_NONE, 0);
803 static drm_intel_bo *
804 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
805 int x, int y, int cpp, uint32_t *tiling_mode,
806 unsigned long *pitch, unsigned long flags)
808 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
809 unsigned long size, stride;
813 unsigned long aligned_y, height_alignment;
815 tiling = *tiling_mode;
817 /* If we're tiled, our allocations are in 8 or 32-row blocks,
818 * so failure to align our height means that we won't allocate
821 * If we're untiled, we still have to align to 2 rows high
822 * because the data port accesses 2x2 blocks even if the
823 * bottom row isn't to be rendered, so failure to align means
824 * we could walk off the end of the GTT and fault. This is
825 * documented on 965, and may be the case on older chipsets
826 * too so we try to be careful.
829 height_alignment = 2;
831 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
832 height_alignment = 16;
833 else if (tiling == I915_TILING_X
834 || (IS_915(bufmgr_gem->pci_device)
835 && tiling == I915_TILING_Y))
836 height_alignment = 8;
837 else if (tiling == I915_TILING_Y)
838 height_alignment = 32;
839 aligned_y = ALIGN(y, height_alignment);
842 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
843 size = stride * aligned_y;
844 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
845 } while (*tiling_mode != tiling);
848 if (tiling == I915_TILING_NONE)
851 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
856 * Returns a drm_intel_bo wrapping the given buffer object handle.
858 * This can be used when one application needs to pass a buffer object
861 drm_public drm_intel_bo *
862 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
866 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
867 drm_intel_bo_gem *bo_gem;
869 struct drm_gem_open open_arg;
870 struct drm_i915_gem_get_tiling get_tiling;
873 /* At the moment most applications only have a few named bo.
874 * For instance, in a DRI client only the render buffers passed
875 * between X and the client are named. And since X returns the
876 * alternating names for the front/back buffer a linear search
877 * provides a sufficiently fast match.
879 for (list = bufmgr_gem->named.next;
880 list != &bufmgr_gem->named;
882 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
883 if (bo_gem->global_name == handle) {
884 drm_intel_gem_bo_reference(&bo_gem->bo);
890 open_arg.name = handle;
891 ret = drmIoctl(bufmgr_gem->fd,
895 DBG("Couldn't reference %s handle 0x%08x: %s\n",
896 name, handle, strerror(errno));
899 /* Now see if someone has used a prime handle to get this
900 * object from the kernel before by looking through the list
901 * again for a matching gem_handle
903 for (list = bufmgr_gem->named.next;
904 list != &bufmgr_gem->named;
906 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
907 if (bo_gem->gem_handle == open_arg.handle) {
908 drm_intel_gem_bo_reference(&bo_gem->bo);
913 bo_gem = calloc(1, sizeof(*bo_gem));
917 bo_gem->bo.size = open_arg.size;
918 bo_gem->bo.offset = 0;
919 bo_gem->bo.offset64 = 0;
920 bo_gem->bo.virtual = NULL;
921 bo_gem->bo.bufmgr = bufmgr;
923 atomic_set(&bo_gem->refcount, 1);
924 bo_gem->validate_index = -1;
925 bo_gem->gem_handle = open_arg.handle;
926 bo_gem->bo.handle = open_arg.handle;
927 bo_gem->global_name = handle;
928 bo_gem->reusable = false;
930 VG_CLEAR(get_tiling);
931 get_tiling.handle = bo_gem->gem_handle;
932 ret = drmIoctl(bufmgr_gem->fd,
933 DRM_IOCTL_I915_GEM_GET_TILING,
936 drm_intel_gem_bo_unreference(&bo_gem->bo);
939 bo_gem->tiling_mode = get_tiling.tiling_mode;
940 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
941 /* XXX stride is unknown */
942 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
944 DRMINITLISTHEAD(&bo_gem->vma_list);
945 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
946 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
952 drm_intel_gem_bo_free(drm_intel_bo *bo)
954 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
955 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
956 struct drm_gem_close close;
959 DRMLISTDEL(&bo_gem->vma_list);
960 if (bo_gem->mem_virtual) {
961 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
962 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
963 bufmgr_gem->vma_count--;
965 if (bo_gem->gtt_virtual) {
966 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
967 bufmgr_gem->vma_count--;
970 /* Close this object */
972 close.handle = bo_gem->gem_handle;
973 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
975 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
976 bo_gem->gem_handle, bo_gem->name, strerror(errno));
978 free(bo_gem->aub_annotations);
983 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
986 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
988 if (bo_gem->mem_virtual)
989 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
991 if (bo_gem->gtt_virtual)
992 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
996 /** Frees all cached buffers significantly older than @time. */
998 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
1002 if (bufmgr_gem->time == time)
1005 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1006 struct drm_intel_gem_bo_bucket *bucket =
1007 &bufmgr_gem->cache_bucket[i];
1009 while (!DRMLISTEMPTY(&bucket->head)) {
1010 drm_intel_bo_gem *bo_gem;
1012 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1013 bucket->head.next, head);
1014 if (time - bo_gem->free_time <= 1)
1017 DRMLISTDEL(&bo_gem->head);
1019 drm_intel_gem_bo_free(&bo_gem->bo);
1023 bufmgr_gem->time = time;
1026 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
1030 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
1031 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
1033 if (bufmgr_gem->vma_max < 0)
1036 /* We may need to evict a few entries in order to create new mmaps */
1037 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
1041 while (bufmgr_gem->vma_count > limit) {
1042 drm_intel_bo_gem *bo_gem;
1044 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1045 bufmgr_gem->vma_cache.next,
1047 assert(bo_gem->map_count == 0);
1048 DRMLISTDELINIT(&bo_gem->vma_list);
1050 if (bo_gem->mem_virtual) {
1051 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1052 bo_gem->mem_virtual = NULL;
1053 bufmgr_gem->vma_count--;
1055 if (bo_gem->gtt_virtual) {
1056 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1057 bo_gem->gtt_virtual = NULL;
1058 bufmgr_gem->vma_count--;
1063 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1064 drm_intel_bo_gem *bo_gem)
1066 bufmgr_gem->vma_open--;
1067 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1068 if (bo_gem->mem_virtual)
1069 bufmgr_gem->vma_count++;
1070 if (bo_gem->gtt_virtual)
1071 bufmgr_gem->vma_count++;
1072 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1075 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1076 drm_intel_bo_gem *bo_gem)
1078 bufmgr_gem->vma_open++;
1079 DRMLISTDEL(&bo_gem->vma_list);
1080 if (bo_gem->mem_virtual)
1081 bufmgr_gem->vma_count--;
1082 if (bo_gem->gtt_virtual)
1083 bufmgr_gem->vma_count--;
1084 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1088 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1090 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1091 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1092 struct drm_intel_gem_bo_bucket *bucket;
1095 /* Unreference all the target buffers */
1096 for (i = 0; i < bo_gem->reloc_count; i++) {
1097 if (bo_gem->reloc_target_info[i].bo != bo) {
1098 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1099 reloc_target_info[i].bo,
1103 bo_gem->reloc_count = 0;
1104 bo_gem->used_as_reloc_target = false;
1106 DBG("bo_unreference final: %d (%s)\n",
1107 bo_gem->gem_handle, bo_gem->name);
1109 /* release memory associated with this object */
1110 if (bo_gem->reloc_target_info) {
1111 free(bo_gem->reloc_target_info);
1112 bo_gem->reloc_target_info = NULL;
1114 if (bo_gem->relocs) {
1115 free(bo_gem->relocs);
1116 bo_gem->relocs = NULL;
1119 /* Clear any left-over mappings */
1120 if (bo_gem->map_count) {
1121 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1122 bo_gem->map_count = 0;
1123 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1124 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1127 DRMLISTDEL(&bo_gem->name_list);
1129 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1130 /* Put the buffer into our internal cache for reuse if we can. */
1131 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1132 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1133 I915_MADV_DONTNEED)) {
1134 bo_gem->free_time = time;
1136 bo_gem->name = NULL;
1137 bo_gem->validate_index = -1;
1139 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1141 drm_intel_gem_bo_free(bo);
1145 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1148 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1150 assert(atomic_read(&bo_gem->refcount) > 0);
1151 if (atomic_dec_and_test(&bo_gem->refcount))
1152 drm_intel_gem_bo_unreference_final(bo, time);
1155 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1157 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1159 assert(atomic_read(&bo_gem->refcount) > 0);
1160 if (atomic_dec_and_test(&bo_gem->refcount)) {
1161 drm_intel_bufmgr_gem *bufmgr_gem =
1162 (drm_intel_bufmgr_gem *) bo->bufmgr;
1163 struct timespec time;
1165 clock_gettime(CLOCK_MONOTONIC, &time);
1167 pthread_mutex_lock(&bufmgr_gem->lock);
1168 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1169 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1170 pthread_mutex_unlock(&bufmgr_gem->lock);
1174 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1176 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1177 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1178 struct drm_i915_gem_set_domain set_domain;
1181 pthread_mutex_lock(&bufmgr_gem->lock);
1183 if (bo_gem->map_count++ == 0)
1184 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1186 if (!bo_gem->mem_virtual) {
1187 struct drm_i915_gem_mmap mmap_arg;
1189 DBG("bo_map: %d (%s), map_count=%d\n",
1190 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1193 mmap_arg.handle = bo_gem->gem_handle;
1194 mmap_arg.offset = 0;
1195 mmap_arg.size = bo->size;
1196 ret = drmIoctl(bufmgr_gem->fd,
1197 DRM_IOCTL_I915_GEM_MMAP,
1201 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1202 __FILE__, __LINE__, bo_gem->gem_handle,
1203 bo_gem->name, strerror(errno));
1204 if (--bo_gem->map_count == 0)
1205 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1206 pthread_mutex_unlock(&bufmgr_gem->lock);
1209 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1210 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1212 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1213 bo_gem->mem_virtual);
1214 bo->virtual = bo_gem->mem_virtual;
1216 VG_CLEAR(set_domain);
1217 set_domain.handle = bo_gem->gem_handle;
1218 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1220 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1222 set_domain.write_domain = 0;
1223 ret = drmIoctl(bufmgr_gem->fd,
1224 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1227 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1228 __FILE__, __LINE__, bo_gem->gem_handle,
1233 bo_gem->mapped_cpu_write = true;
1235 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1236 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1237 pthread_mutex_unlock(&bufmgr_gem->lock);
1243 map_gtt(drm_intel_bo *bo)
1245 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1246 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1249 if (bo_gem->map_count++ == 0)
1250 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1252 /* Get a mapping of the buffer if we haven't before. */
1253 if (bo_gem->gtt_virtual == NULL) {
1254 struct drm_i915_gem_mmap_gtt mmap_arg;
1256 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1257 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1260 mmap_arg.handle = bo_gem->gem_handle;
1262 /* Get the fake offset back... */
1263 ret = drmIoctl(bufmgr_gem->fd,
1264 DRM_IOCTL_I915_GEM_MMAP_GTT,
1268 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1270 bo_gem->gem_handle, bo_gem->name,
1272 if (--bo_gem->map_count == 0)
1273 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1278 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1279 MAP_SHARED, bufmgr_gem->fd,
1281 if (bo_gem->gtt_virtual == MAP_FAILED) {
1282 bo_gem->gtt_virtual = NULL;
1284 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1286 bo_gem->gem_handle, bo_gem->name,
1288 if (--bo_gem->map_count == 0)
1289 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1294 bo->virtual = bo_gem->gtt_virtual;
1296 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1297 bo_gem->gtt_virtual);
1303 drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1305 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1306 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1307 struct drm_i915_gem_set_domain set_domain;
1310 pthread_mutex_lock(&bufmgr_gem->lock);
1314 pthread_mutex_unlock(&bufmgr_gem->lock);
1318 /* Now move it to the GTT domain so that the GPU and CPU
1319 * caches are flushed and the GPU isn't actively using the
1322 * The pagefault handler does this domain change for us when
1323 * it has unbound the BO from the GTT, but it's up to us to
1324 * tell it when we're about to use things if we had done
1325 * rendering and it still happens to be bound to the GTT.
1327 VG_CLEAR(set_domain);
1328 set_domain.handle = bo_gem->gem_handle;
1329 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1330 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1331 ret = drmIoctl(bufmgr_gem->fd,
1332 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1335 DBG("%s:%d: Error setting domain %d: %s\n",
1336 __FILE__, __LINE__, bo_gem->gem_handle,
1340 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1341 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1342 pthread_mutex_unlock(&bufmgr_gem->lock);
1348 * Performs a mapping of the buffer object like the normal GTT
1349 * mapping, but avoids waiting for the GPU to be done reading from or
1350 * rendering to the buffer.
1352 * This is used in the implementation of GL_ARB_map_buffer_range: The
1353 * user asks to create a buffer, then does a mapping, fills some
1354 * space, runs a drawing command, then asks to map it again without
1355 * synchronizing because it guarantees that it won't write over the
1356 * data that the GPU is busy using (or, more specifically, that if it
1357 * does write over the data, it acknowledges that rendering is
1362 drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1364 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1365 #ifdef HAVE_VALGRIND
1366 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1370 /* If the CPU cache isn't coherent with the GTT, then use a
1371 * regular synchronized mapping. The problem is that we don't
1372 * track where the buffer was last used on the CPU side in
1373 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1374 * we would potentially corrupt the buffer even when the user
1375 * does reasonable things.
1377 if (!bufmgr_gem->has_llc)
1378 return drm_intel_gem_bo_map_gtt(bo);
1380 pthread_mutex_lock(&bufmgr_gem->lock);
1384 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1385 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1388 pthread_mutex_unlock(&bufmgr_gem->lock);
1393 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1395 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1396 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1402 pthread_mutex_lock(&bufmgr_gem->lock);
1404 if (bo_gem->map_count <= 0) {
1405 DBG("attempted to unmap an unmapped bo\n");
1406 pthread_mutex_unlock(&bufmgr_gem->lock);
1407 /* Preserve the old behaviour of just treating this as a
1408 * no-op rather than reporting the error.
1413 if (bo_gem->mapped_cpu_write) {
1414 struct drm_i915_gem_sw_finish sw_finish;
1416 /* Cause a flush to happen if the buffer's pinned for
1417 * scanout, so the results show up in a timely manner.
1418 * Unlike GTT set domains, this only does work if the
1419 * buffer should be scanout-related.
1421 VG_CLEAR(sw_finish);
1422 sw_finish.handle = bo_gem->gem_handle;
1423 ret = drmIoctl(bufmgr_gem->fd,
1424 DRM_IOCTL_I915_GEM_SW_FINISH,
1426 ret = ret == -1 ? -errno : 0;
1428 bo_gem->mapped_cpu_write = false;
1431 /* We need to unmap after every innovation as we cannot track
1432 * an open vma for every bo as that will exhaasut the system
1433 * limits and cause later failures.
1435 if (--bo_gem->map_count == 0) {
1436 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1437 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1440 pthread_mutex_unlock(&bufmgr_gem->lock);
1446 drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1448 return drm_intel_gem_bo_unmap(bo);
1452 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1453 unsigned long size, const void *data)
1455 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1456 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1457 struct drm_i915_gem_pwrite pwrite;
1461 pwrite.handle = bo_gem->gem_handle;
1462 pwrite.offset = offset;
1464 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1465 ret = drmIoctl(bufmgr_gem->fd,
1466 DRM_IOCTL_I915_GEM_PWRITE,
1470 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1471 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1472 (int)size, strerror(errno));
1479 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1481 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1482 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1485 VG_CLEAR(get_pipe_from_crtc_id);
1486 get_pipe_from_crtc_id.crtc_id = crtc_id;
1487 ret = drmIoctl(bufmgr_gem->fd,
1488 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1489 &get_pipe_from_crtc_id);
1491 /* We return -1 here to signal that we don't
1492 * know which pipe is associated with this crtc.
1493 * This lets the caller know that this information
1494 * isn't available; using the wrong pipe for
1495 * vblank waiting can cause the chipset to lock up
1500 return get_pipe_from_crtc_id.pipe;
1504 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1505 unsigned long size, void *data)
1507 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1508 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1509 struct drm_i915_gem_pread pread;
1513 pread.handle = bo_gem->gem_handle;
1514 pread.offset = offset;
1516 pread.data_ptr = (uint64_t) (uintptr_t) data;
1517 ret = drmIoctl(bufmgr_gem->fd,
1518 DRM_IOCTL_I915_GEM_PREAD,
1522 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1523 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1524 (int)size, strerror(errno));
1530 /** Waits for all GPU rendering with the object to have completed. */
1532 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1534 drm_intel_gem_bo_start_gtt_access(bo, 1);
1538 * Waits on a BO for the given amount of time.
1540 * @bo: buffer object to wait for
1541 * @timeout_ns: amount of time to wait in nanoseconds.
1542 * If value is less than 0, an infinite wait will occur.
1544 * Returns 0 if the wait was successful ie. the last batch referencing the
1545 * object has completed within the allotted time. Otherwise some negative return
1546 * value describes the error. Of particular interest is -ETIME when the wait has
1547 * failed to yield the desired result.
1549 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1550 * the operation to give up after a certain amount of time. Another subtle
1551 * difference is the internal locking semantics are different (this variant does
1552 * not hold the lock for the duration of the wait). This makes the wait subject
1553 * to a larger userspace race window.
1555 * The implementation shall wait until the object is no longer actively
1556 * referenced within a batch buffer at the time of the call. The wait will
1557 * not guarantee that the buffer is re-issued via another thread, or an flinked
1558 * handle. Userspace must make sure this race does not occur if such precision
1562 drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
1564 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1565 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1566 struct drm_i915_gem_wait wait;
1569 if (!bufmgr_gem->has_wait_timeout) {
1570 DBG("%s:%d: Timed wait is not supported. Falling back to "
1571 "infinite wait\n", __FILE__, __LINE__);
1573 drm_intel_gem_bo_wait_rendering(bo);
1576 return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
1580 wait.bo_handle = bo_gem->gem_handle;
1581 wait.timeout_ns = timeout_ns;
1583 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1591 * Sets the object to the GTT read and possibly write domain, used by the X
1592 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1594 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1595 * can do tiled pixmaps this way.
1598 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1600 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1601 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1602 struct drm_i915_gem_set_domain set_domain;
1605 VG_CLEAR(set_domain);
1606 set_domain.handle = bo_gem->gem_handle;
1607 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1608 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1609 ret = drmIoctl(bufmgr_gem->fd,
1610 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1613 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1614 __FILE__, __LINE__, bo_gem->gem_handle,
1615 set_domain.read_domains, set_domain.write_domain,
1621 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1623 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1626 free(bufmgr_gem->exec2_objects);
1627 free(bufmgr_gem->exec_objects);
1628 free(bufmgr_gem->exec_bos);
1629 free(bufmgr_gem->aub_filename);
1631 pthread_mutex_destroy(&bufmgr_gem->lock);
1633 /* Free any cached buffer objects we were going to reuse */
1634 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1635 struct drm_intel_gem_bo_bucket *bucket =
1636 &bufmgr_gem->cache_bucket[i];
1637 drm_intel_bo_gem *bo_gem;
1639 while (!DRMLISTEMPTY(&bucket->head)) {
1640 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1641 bucket->head.next, head);
1642 DRMLISTDEL(&bo_gem->head);
1644 drm_intel_gem_bo_free(&bo_gem->bo);
1652 * Adds the target buffer to the validation list and adds the relocation
1653 * to the reloc_buffer's relocation list.
1655 * The relocation entry at the given offset must already contain the
1656 * precomputed relocation value, because the kernel will optimize out
1657 * the relocation entry write when the buffer hasn't moved from the
1658 * last known offset in target_bo.
1661 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1662 drm_intel_bo *target_bo, uint32_t target_offset,
1663 uint32_t read_domains, uint32_t write_domain,
1666 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1667 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1668 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1669 bool fenced_command;
1671 if (bo_gem->has_error)
1674 if (target_bo_gem->has_error) {
1675 bo_gem->has_error = true;
1679 /* We never use HW fences for rendering on 965+ */
1680 if (bufmgr_gem->gen >= 4)
1683 fenced_command = need_fence;
1684 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1687 /* Create a new relocation list if needed */
1688 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1691 /* Check overflow */
1692 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1695 assert(offset <= bo->size - 4);
1696 assert((write_domain & (write_domain - 1)) == 0);
1698 /* Make sure that we're not adding a reloc to something whose size has
1699 * already been accounted for.
1701 assert(!bo_gem->used_as_reloc_target);
1702 if (target_bo_gem != bo_gem) {
1703 target_bo_gem->used_as_reloc_target = true;
1704 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1706 /* An object needing a fence is a tiled buffer, so it won't have
1707 * relocs to other buffers.
1710 target_bo_gem->reloc_tree_fences = 1;
1711 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1713 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1714 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1715 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1716 target_bo_gem->gem_handle;
1717 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1718 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1719 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
1721 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1722 if (target_bo != bo)
1723 drm_intel_gem_bo_reference(target_bo);
1725 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1726 DRM_INTEL_RELOC_FENCE;
1728 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1730 bo_gem->reloc_count++;
1736 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1737 drm_intel_bo *target_bo, uint32_t target_offset,
1738 uint32_t read_domains, uint32_t write_domain)
1740 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1742 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1743 read_domains, write_domain,
1744 !bufmgr_gem->fenced_relocs);
1748 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1749 drm_intel_bo *target_bo,
1750 uint32_t target_offset,
1751 uint32_t read_domains, uint32_t write_domain)
1753 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1754 read_domains, write_domain, true);
1758 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1760 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1762 return bo_gem->reloc_count;
1766 * Removes existing relocation entries in the BO after "start".
1768 * This allows a user to avoid a two-step process for state setup with
1769 * counting up all the buffer objects and doing a
1770 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1771 * relocations for the state setup. Instead, save the state of the
1772 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1773 * state, and then check if it still fits in the aperture.
1775 * Any further drm_intel_bufmgr_check_aperture_space() queries
1776 * involving this buffer in the tree are undefined after this call.
1779 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1781 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1783 struct timespec time;
1785 clock_gettime(CLOCK_MONOTONIC, &time);
1787 assert(bo_gem->reloc_count >= start);
1788 /* Unreference the cleared target buffers */
1789 for (i = start; i < bo_gem->reloc_count; i++) {
1790 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
1791 if (&target_bo_gem->bo != bo) {
1792 bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
1793 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
1797 bo_gem->reloc_count = start;
1801 * Walk the tree of relocations rooted at BO and accumulate the list of
1802 * validations to be performed and update the relocation buffers with
1803 * index values into the validation list.
1806 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1808 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1811 if (bo_gem->relocs == NULL)
1814 for (i = 0; i < bo_gem->reloc_count; i++) {
1815 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1817 if (target_bo == bo)
1820 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1822 /* Continue walking the tree depth-first. */
1823 drm_intel_gem_bo_process_reloc(target_bo);
1825 /* Add the target to the validate list */
1826 drm_intel_add_validate_buffer(target_bo);
1831 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1833 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1836 if (bo_gem->relocs == NULL)
1839 for (i = 0; i < bo_gem->reloc_count; i++) {
1840 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1843 if (target_bo == bo)
1846 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1848 /* Continue walking the tree depth-first. */
1849 drm_intel_gem_bo_process_reloc2(target_bo);
1851 need_fence = (bo_gem->reloc_target_info[i].flags &
1852 DRM_INTEL_RELOC_FENCE);
1854 /* Add the target to the validate list */
1855 drm_intel_add_validate_buffer2(target_bo, need_fence);
1861 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1865 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1866 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1867 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1869 /* Update the buffer offset */
1870 if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
1871 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1872 bo_gem->gem_handle, bo_gem->name, bo->offset64,
1873 (unsigned long long)bufmgr_gem->exec_objects[i].
1875 bo->offset64 = bufmgr_gem->exec_objects[i].offset;
1876 bo->offset = bufmgr_gem->exec_objects[i].offset;
1882 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1886 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1887 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1888 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1890 /* Update the buffer offset */
1891 if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
1892 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1893 bo_gem->gem_handle, bo_gem->name, bo->offset64,
1894 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1895 bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
1896 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1902 aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data)
1904 fwrite(&data, 1, 4, bufmgr_gem->aub_file);
1908 aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size)
1910 fwrite(data, 1, size, bufmgr_gem->aub_file);
1914 aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size)
1916 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1917 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1921 data = malloc(bo->size);
1922 drm_intel_bo_get_subdata(bo, offset, size, data);
1924 /* Easy mode: write out bo with no relocations */
1925 if (!bo_gem->reloc_count) {
1926 aub_out_data(bufmgr_gem, data, size);
1931 /* Otherwise, handle the relocations while writing. */
1932 for (i = 0; i < size / 4; i++) {
1934 for (r = 0; r < bo_gem->reloc_count; r++) {
1935 struct drm_i915_gem_relocation_entry *reloc;
1936 drm_intel_reloc_target *info;
1938 reloc = &bo_gem->relocs[r];
1939 info = &bo_gem->reloc_target_info[r];
1941 if (reloc->offset == offset + i * 4) {
1942 drm_intel_bo_gem *target_gem;
1945 target_gem = (drm_intel_bo_gem *)info->bo;
1948 val += target_gem->aub_offset;
1950 aub_out(bufmgr_gem, val);
1955 if (r == bo_gem->reloc_count) {
1956 /* no relocation, just the data */
1957 aub_out(bufmgr_gem, data[i]);
1965 aub_bo_get_address(drm_intel_bo *bo)
1967 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1968 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1970 /* Give the object a graphics address in the AUB file. We
1971 * don't just use the GEM object address because we do AUB
1972 * dumping before execution -- we want to successfully log
1973 * when the hardware might hang, and we might even want to aub
1974 * capture for a driver trying to execute on a different
1975 * generation of hardware by disabling the actual kernel exec
1978 bo_gem->aub_offset = bufmgr_gem->aub_offset;
1979 bufmgr_gem->aub_offset += bo->size;
1980 /* XXX: Handle aperture overflow. */
1981 assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024);
1985 aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
1986 uint32_t offset, uint32_t size)
1988 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1989 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1992 CMD_AUB_TRACE_HEADER_BLOCK |
1993 ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
1995 AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE);
1996 aub_out(bufmgr_gem, subtype);
1997 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
1998 aub_out(bufmgr_gem, size);
1999 if (bufmgr_gem->gen >= 8)
2000 aub_out(bufmgr_gem, 0);
2001 aub_write_bo_data(bo, offset, size);
2005 * Break up large objects into multiple writes. Otherwise a 128kb VBO
2006 * would overflow the 16 bits of size field in the packet header and
2007 * everything goes badly after that.
2010 aub_write_large_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
2011 uint32_t offset, uint32_t size)
2013 uint32_t block_size;
2014 uint32_t sub_offset;
2016 for (sub_offset = 0; sub_offset < size; sub_offset += block_size) {
2017 block_size = size - sub_offset;
2019 if (block_size > 8 * 4096)
2020 block_size = 8 * 4096;
2022 aub_write_trace_block(bo, type, subtype, offset + sub_offset,
2028 aub_write_bo(drm_intel_bo *bo)
2030 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2031 uint32_t offset = 0;
2034 aub_bo_get_address(bo);
2036 /* Write out each annotated section separately. */
2037 for (i = 0; i < bo_gem->aub_annotation_count; ++i) {
2038 drm_intel_aub_annotation *annotation =
2039 &bo_gem->aub_annotations[i];
2040 uint32_t ending_offset = annotation->ending_offset;
2041 if (ending_offset > bo->size)
2042 ending_offset = bo->size;
2043 if (ending_offset > offset) {
2044 aub_write_large_trace_block(bo, annotation->type,
2045 annotation->subtype,
2047 ending_offset - offset);
2048 offset = ending_offset;
2052 /* Write out any remaining unannotated data */
2053 if (offset < bo->size) {
2054 aub_write_large_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
2055 offset, bo->size - offset);
2060 * Make a ringbuffer on fly and dump it
2063 aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
2064 uint32_t batch_buffer, int ring_flag)
2066 uint32_t ringbuffer[4096];
2067 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
2070 if (ring_flag == I915_EXEC_BSD)
2071 ring = AUB_TRACE_TYPE_RING_PRB1;
2072 else if (ring_flag == I915_EXEC_BLT)
2073 ring = AUB_TRACE_TYPE_RING_PRB2;
2075 /* Make a ring buffer to execute our batchbuffer. */
2076 memset(ringbuffer, 0, sizeof(ringbuffer));
2077 if (bufmgr_gem->gen >= 8) {
2078 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START | (3 - 2);
2079 ringbuffer[ring_count++] = batch_buffer;
2080 ringbuffer[ring_count++] = 0;
2082 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
2083 ringbuffer[ring_count++] = batch_buffer;
2086 /* Write out the ring. This appears to trigger execution of
2087 * the ring in the simulator.
2090 CMD_AUB_TRACE_HEADER_BLOCK |
2091 ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
2093 AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
2094 aub_out(bufmgr_gem, 0); /* general/surface subtype */
2095 aub_out(bufmgr_gem, bufmgr_gem->aub_offset);
2096 aub_out(bufmgr_gem, ring_count * 4);
2097 if (bufmgr_gem->gen >= 8)
2098 aub_out(bufmgr_gem, 0);
2100 /* FIXME: Need some flush operations here? */
2101 aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4);
2103 /* Update offset pointer */
2104 bufmgr_gem->aub_offset += 4096;
2108 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
2109 int x1, int y1, int width, int height,
2110 enum aub_dump_bmp_format format,
2111 int pitch, int offset)
2113 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2114 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2118 case AUB_DUMP_BMP_FORMAT_8BIT:
2121 case AUB_DUMP_BMP_FORMAT_ARGB_4444:
2124 case AUB_DUMP_BMP_FORMAT_ARGB_0888:
2125 case AUB_DUMP_BMP_FORMAT_ARGB_8888:
2129 printf("Unknown AUB dump format %d\n", format);
2133 if (!bufmgr_gem->aub_file)
2136 aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4);
2137 aub_out(bufmgr_gem, (y1 << 16) | x1);
2142 aub_out(bufmgr_gem, (height << 16) | width);
2143 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
2145 ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) |
2146 ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0));
2150 aub_exec(drm_intel_bo *bo, int ring_flag, int used)
2152 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2153 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2155 bool batch_buffer_needs_annotations;
2157 if (!bufmgr_gem->aub_file)
2160 /* If batch buffer is not annotated, annotate it the best we
2163 batch_buffer_needs_annotations = bo_gem->aub_annotation_count == 0;
2164 if (batch_buffer_needs_annotations) {
2165 drm_intel_aub_annotation annotations[2] = {
2166 { AUB_TRACE_TYPE_BATCH, 0, used },
2167 { AUB_TRACE_TYPE_NOTYPE, 0, bo->size }
2169 drm_intel_bufmgr_gem_set_aub_annotations(bo, annotations, 2);
2172 /* Write out all buffers to AUB memory */
2173 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2174 aub_write_bo(bufmgr_gem->exec_bos[i]);
2177 /* Remove any annotations we added */
2178 if (batch_buffer_needs_annotations)
2179 drm_intel_bufmgr_gem_set_aub_annotations(bo, NULL, 0);
2181 /* Dump ring buffer */
2182 aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag);
2184 fflush(bufmgr_gem->aub_file);
2187 * One frame has been dumped. So reset the aub_offset for the next frame.
2189 * FIXME: Can we do this?
2191 bufmgr_gem->aub_offset = 0x10000;
2195 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2196 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
2198 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2199 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2200 struct drm_i915_gem_execbuffer execbuf;
2203 if (bo_gem->has_error)
2206 pthread_mutex_lock(&bufmgr_gem->lock);
2207 /* Update indices and set up the validate list. */
2208 drm_intel_gem_bo_process_reloc(bo);
2210 /* Add the batch buffer to the validation list. There are no
2211 * relocations pointing to it.
2213 drm_intel_add_validate_buffer(bo);
2216 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2217 execbuf.buffer_count = bufmgr_gem->exec_count;
2218 execbuf.batch_start_offset = 0;
2219 execbuf.batch_len = used;
2220 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2221 execbuf.num_cliprects = num_cliprects;
2225 ret = drmIoctl(bufmgr_gem->fd,
2226 DRM_IOCTL_I915_GEM_EXECBUFFER,
2230 if (errno == ENOSPC) {
2231 DBG("Execbuffer fails to pin. "
2232 "Estimate: %u. Actual: %u. Available: %u\n",
2233 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2236 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2239 (unsigned int)bufmgr_gem->gtt_size);
2242 drm_intel_update_buffer_offsets(bufmgr_gem);
2244 if (bufmgr_gem->bufmgr.debug)
2245 drm_intel_gem_dump_validation_list(bufmgr_gem);
2247 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2248 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2249 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2251 bo_gem->idle = false;
2253 /* Disconnect the buffer from the validate list */
2254 bo_gem->validate_index = -1;
2255 bufmgr_gem->exec_bos[i] = NULL;
2257 bufmgr_gem->exec_count = 0;
2258 pthread_mutex_unlock(&bufmgr_gem->lock);
2264 do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2265 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2268 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2269 struct drm_i915_gem_execbuffer2 execbuf;
2273 switch (flags & 0x7) {
2277 if (!bufmgr_gem->has_blt)
2281 if (!bufmgr_gem->has_bsd)
2284 case I915_EXEC_VEBOX:
2285 if (!bufmgr_gem->has_vebox)
2288 case I915_EXEC_RENDER:
2289 case I915_EXEC_DEFAULT:
2293 pthread_mutex_lock(&bufmgr_gem->lock);
2294 /* Update indices and set up the validate list. */
2295 drm_intel_gem_bo_process_reloc2(bo);
2297 /* Add the batch buffer to the validation list. There are no relocations
2300 drm_intel_add_validate_buffer2(bo, 0);
2303 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2304 execbuf.buffer_count = bufmgr_gem->exec_count;
2305 execbuf.batch_start_offset = 0;
2306 execbuf.batch_len = used;
2307 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2308 execbuf.num_cliprects = num_cliprects;
2311 execbuf.flags = flags;
2313 i915_execbuffer2_set_context_id(execbuf, 0);
2315 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
2318 aub_exec(bo, flags, used);
2320 if (bufmgr_gem->no_exec)
2321 goto skip_execution;
2323 ret = drmIoctl(bufmgr_gem->fd,
2324 DRM_IOCTL_I915_GEM_EXECBUFFER2,
2328 if (ret == -ENOSPC) {
2329 DBG("Execbuffer fails to pin. "
2330 "Estimate: %u. Actual: %u. Available: %u\n",
2331 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2332 bufmgr_gem->exec_count),
2333 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2334 bufmgr_gem->exec_count),
2335 (unsigned int) bufmgr_gem->gtt_size);
2338 drm_intel_update_buffer_offsets2(bufmgr_gem);
2341 if (bufmgr_gem->bufmgr.debug)
2342 drm_intel_gem_dump_validation_list(bufmgr_gem);
2344 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2345 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2346 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2348 bo_gem->idle = false;
2350 /* Disconnect the buffer from the validate list */
2351 bo_gem->validate_index = -1;
2352 bufmgr_gem->exec_bos[i] = NULL;
2354 bufmgr_gem->exec_count = 0;
2355 pthread_mutex_unlock(&bufmgr_gem->lock);
2361 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2362 drm_clip_rect_t *cliprects, int num_cliprects,
2365 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2370 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2371 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2374 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2379 drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2380 int used, unsigned int flags)
2382 return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
2386 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2388 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2389 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2390 struct drm_i915_gem_pin pin;
2394 pin.handle = bo_gem->gem_handle;
2395 pin.alignment = alignment;
2397 ret = drmIoctl(bufmgr_gem->fd,
2398 DRM_IOCTL_I915_GEM_PIN,
2403 bo->offset64 = pin.offset;
2404 bo->offset = pin.offset;
2409 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2411 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2412 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2413 struct drm_i915_gem_unpin unpin;
2417 unpin.handle = bo_gem->gem_handle;
2419 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2427 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2428 uint32_t tiling_mode,
2431 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2432 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2433 struct drm_i915_gem_set_tiling set_tiling;
2436 if (bo_gem->global_name == 0 &&
2437 tiling_mode == bo_gem->tiling_mode &&
2438 stride == bo_gem->stride)
2441 memset(&set_tiling, 0, sizeof(set_tiling));
2443 /* set_tiling is slightly broken and overwrites the
2444 * input on the error path, so we have to open code
2447 set_tiling.handle = bo_gem->gem_handle;
2448 set_tiling.tiling_mode = tiling_mode;
2449 set_tiling.stride = stride;
2451 ret = ioctl(bufmgr_gem->fd,
2452 DRM_IOCTL_I915_GEM_SET_TILING,
2454 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2458 bo_gem->tiling_mode = set_tiling.tiling_mode;
2459 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2460 bo_gem->stride = set_tiling.stride;
2465 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2468 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2469 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2472 /* Linear buffers have no stride. By ensuring that we only ever use
2473 * stride 0 with linear buffers, we simplify our code.
2475 if (*tiling_mode == I915_TILING_NONE)
2478 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2480 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
2482 *tiling_mode = bo_gem->tiling_mode;
2487 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2488 uint32_t * swizzle_mode)
2490 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2492 *tiling_mode = bo_gem->tiling_mode;
2493 *swizzle_mode = bo_gem->swizzle_mode;
2497 drm_public drm_intel_bo *
2498 drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
2500 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2503 drm_intel_bo_gem *bo_gem;
2504 struct drm_i915_gem_get_tiling get_tiling;
2505 drmMMListHead *list;
2507 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2510 * See if the kernel has already returned this buffer to us. Just as
2511 * for named buffers, we must not create two bo's pointing at the same
2514 for (list = bufmgr_gem->named.next;
2515 list != &bufmgr_gem->named;
2516 list = list->next) {
2517 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
2518 if (bo_gem->gem_handle == handle) {
2519 drm_intel_gem_bo_reference(&bo_gem->bo);
2525 fprintf(stderr,"ret is %d %d\n", ret, errno);
2529 bo_gem = calloc(1, sizeof(*bo_gem));
2533 /* Determine size of bo. The fd-to-handle ioctl really should
2534 * return the size, but it doesn't. If we have kernel 3.12 or
2535 * later, we can lseek on the prime fd to get the size. Older
2536 * kernels will just fail, in which case we fall back to the
2537 * provided (estimated or guess size). */
2538 ret = lseek(prime_fd, 0, SEEK_END);
2540 bo_gem->bo.size = ret;
2542 bo_gem->bo.size = size;
2544 bo_gem->bo.handle = handle;
2545 bo_gem->bo.bufmgr = bufmgr;
2547 bo_gem->gem_handle = handle;
2549 atomic_set(&bo_gem->refcount, 1);
2551 bo_gem->name = "prime";
2552 bo_gem->validate_index = -1;
2553 bo_gem->reloc_tree_fences = 0;
2554 bo_gem->used_as_reloc_target = false;
2555 bo_gem->has_error = false;
2556 bo_gem->reusable = false;
2558 DRMINITLISTHEAD(&bo_gem->vma_list);
2559 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2561 VG_CLEAR(get_tiling);
2562 get_tiling.handle = bo_gem->gem_handle;
2563 ret = drmIoctl(bufmgr_gem->fd,
2564 DRM_IOCTL_I915_GEM_GET_TILING,
2567 drm_intel_gem_bo_unreference(&bo_gem->bo);
2570 bo_gem->tiling_mode = get_tiling.tiling_mode;
2571 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
2572 /* XXX stride is unknown */
2573 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
2579 drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
2581 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2582 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2584 if (DRMLISTEMPTY(&bo_gem->name_list))
2585 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2587 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
2588 DRM_CLOEXEC, prime_fd) != 0)
2591 bo_gem->reusable = false;
2597 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2599 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2600 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2603 if (!bo_gem->global_name) {
2604 struct drm_gem_flink flink;
2607 flink.handle = bo_gem->gem_handle;
2609 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
2613 bo_gem->global_name = flink.name;
2614 bo_gem->reusable = false;
2616 if (DRMLISTEMPTY(&bo_gem->name_list))
2617 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2620 *name = bo_gem->global_name;
2625 * Enables unlimited caching of buffer objects for reuse.
2627 * This is potentially very memory expensive, as the cache at each bucket
2628 * size is only bounded by how many buffers of that size we've managed to have
2629 * in flight at once.
2632 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2634 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2636 bufmgr_gem->bo_reuse = true;
2640 * Enable use of fenced reloc type.
2642 * New code should enable this to avoid unnecessary fence register
2643 * allocation. If this option is not enabled, all relocs will have fence
2644 * register allocated.
2647 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2649 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2651 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2652 bufmgr_gem->fenced_relocs = true;
2656 * Return the additional aperture space required by the tree of buffer objects
2660 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2662 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2666 if (bo == NULL || bo_gem->included_in_check_aperture)
2670 bo_gem->included_in_check_aperture = true;
2672 for (i = 0; i < bo_gem->reloc_count; i++)
2674 drm_intel_gem_bo_get_aperture_space(bo_gem->
2675 reloc_target_info[i].bo);
2681 * Count the number of buffers in this list that need a fence reg
2683 * If the count is greater than the number of available regs, we'll have
2684 * to ask the caller to resubmit a batch with fewer tiled buffers.
2686 * This function over-counts if the same buffer is used multiple times.
2689 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2692 unsigned int total = 0;
2694 for (i = 0; i < count; i++) {
2695 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2700 total += bo_gem->reloc_tree_fences;
2706 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2707 * for the next drm_intel_bufmgr_check_aperture_space() call.
2710 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2712 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2715 if (bo == NULL || !bo_gem->included_in_check_aperture)
2718 bo_gem->included_in_check_aperture = false;
2720 for (i = 0; i < bo_gem->reloc_count; i++)
2721 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2722 reloc_target_info[i].bo);
2726 * Return a conservative estimate for the amount of aperture required
2727 * for a collection of buffers. This may double-count some buffers.
2730 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2733 unsigned int total = 0;
2735 for (i = 0; i < count; i++) {
2736 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2738 total += bo_gem->reloc_tree_size;
2744 * Return the amount of aperture needed for a collection of buffers.
2745 * This avoids double counting any buffers, at the cost of looking
2746 * at every buffer in the set.
2749 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2752 unsigned int total = 0;
2754 for (i = 0; i < count; i++) {
2755 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2756 /* For the first buffer object in the array, we get an
2757 * accurate count back for its reloc_tree size (since nothing
2758 * had been flagged as being counted yet). We can save that
2759 * value out as a more conservative reloc_tree_size that
2760 * avoids double-counting target buffers. Since the first
2761 * buffer happens to usually be the batch buffer in our
2762 * callers, this can pull us back from doing the tree
2763 * walk on every new batch emit.
2766 drm_intel_bo_gem *bo_gem =
2767 (drm_intel_bo_gem *) bo_array[i];
2768 bo_gem->reloc_tree_size = total;
2772 for (i = 0; i < count; i++)
2773 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2778 * Return -1 if the batchbuffer should be flushed before attempting to
2779 * emit rendering referencing the buffers pointed to by bo_array.
2781 * This is required because if we try to emit a batchbuffer with relocations
2782 * to a tree of buffers that won't simultaneously fit in the aperture,
2783 * the rendering will return an error at a point where the software is not
2784 * prepared to recover from it.
2786 * However, we also want to emit the batchbuffer significantly before we reach
2787 * the limit, as a series of batchbuffers each of which references buffers
2788 * covering almost all of the aperture means that at each emit we end up
2789 * waiting to evict a buffer from the last rendering, and we get synchronous
2790 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2791 * get better parallelism.
2794 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2796 drm_intel_bufmgr_gem *bufmgr_gem =
2797 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2798 unsigned int total = 0;
2799 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2802 /* Check for fence reg constraints if necessary */
2803 if (bufmgr_gem->available_fences) {
2804 total_fences = drm_intel_gem_total_fences(bo_array, count);
2805 if (total_fences > bufmgr_gem->available_fences)
2809 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2811 if (total > threshold)
2812 total = drm_intel_gem_compute_batch_space(bo_array, count);
2814 if (total > threshold) {
2815 DBG("check_space: overflowed available aperture, "
2817 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2820 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2821 (int)bufmgr_gem->gtt_size / 1024);
2827 * Disable buffer reuse for objects which are shared with the kernel
2828 * as scanout buffers
2831 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2833 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2835 bo_gem->reusable = false;
2840 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2842 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2844 return bo_gem->reusable;
2848 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2850 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2853 for (i = 0; i < bo_gem->reloc_count; i++) {
2854 if (bo_gem->reloc_target_info[i].bo == target_bo)
2856 if (bo == bo_gem->reloc_target_info[i].bo)
2858 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2866 /** Return true if target_bo is referenced by bo's relocation tree. */
2868 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2870 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2872 if (bo == NULL || target_bo == NULL)
2874 if (target_bo_gem->used_as_reloc_target)
2875 return _drm_intel_gem_bo_references(bo, target_bo);
2880 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2882 unsigned int i = bufmgr_gem->num_buckets;
2884 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2886 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2887 bufmgr_gem->cache_bucket[i].size = size;
2888 bufmgr_gem->num_buckets++;
2892 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2894 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2896 /* OK, so power of two buckets was too wasteful of memory.
2897 * Give 3 other sizes between each power of two, to hopefully
2898 * cover things accurately enough. (The alternative is
2899 * probably to just go for exact matching of sizes, and assume
2900 * that for things like composited window resize the tiled
2901 * width/height alignment and rounding of sizes to pages will
2902 * get us useful cache hit rates anyway)
2904 add_bucket(bufmgr_gem, 4096);
2905 add_bucket(bufmgr_gem, 4096 * 2);
2906 add_bucket(bufmgr_gem, 4096 * 3);
2908 /* Initialize the linked lists for BO reuse cache. */
2909 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2910 add_bucket(bufmgr_gem, size);
2912 add_bucket(bufmgr_gem, size + size * 1 / 4);
2913 add_bucket(bufmgr_gem, size + size * 2 / 4);
2914 add_bucket(bufmgr_gem, size + size * 3 / 4);
2919 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
2921 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2923 bufmgr_gem->vma_max = limit;
2925 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
2929 * Get the PCI ID for the device. This can be overridden by setting the
2930 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
2933 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
2935 char *devid_override;
2938 drm_i915_getparam_t gp;
2940 if (geteuid() == getuid()) {
2941 devid_override = getenv("INTEL_DEVID_OVERRIDE");
2942 if (devid_override) {
2943 bufmgr_gem->no_exec = true;
2944 return strtod(devid_override, NULL);
2950 gp.param = I915_PARAM_CHIPSET_ID;
2952 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2954 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2955 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2961 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
2963 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2965 return bufmgr_gem->pci_device;
2969 * Sets the AUB filename.
2971 * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
2972 * for it to have any effect.
2975 drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
2976 const char *filename)
2978 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2980 free(bufmgr_gem->aub_filename);
2982 bufmgr_gem->aub_filename = strdup(filename);
2986 * Sets up AUB dumping.
2988 * This is a trace file format that can be used with the simulator.
2989 * Packets are emitted in a format somewhat like GPU command packets.
2990 * You can set up a GTT and upload your objects into the referenced
2991 * space, then send off batchbuffers and get BMPs out the other end.
2994 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
2996 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2997 int entry = 0x200003;
2999 int gtt_size = 0x10000;
3000 const char *filename;
3003 if (bufmgr_gem->aub_file) {
3004 fclose(bufmgr_gem->aub_file);
3005 bufmgr_gem->aub_file = NULL;
3010 if (geteuid() != getuid())
3013 if (bufmgr_gem->aub_filename)
3014 filename = bufmgr_gem->aub_filename;
3016 filename = "intel.aub";
3017 bufmgr_gem->aub_file = fopen(filename, "w+");
3018 if (!bufmgr_gem->aub_file)
3021 /* Start allocating objects from just after the GTT. */
3022 bufmgr_gem->aub_offset = gtt_size;
3024 /* Start with a (required) version packet. */
3025 aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2));
3027 (4 << AUB_HEADER_MAJOR_SHIFT) |
3028 (0 << AUB_HEADER_MINOR_SHIFT));
3029 for (i = 0; i < 8; i++) {
3030 aub_out(bufmgr_gem, 0); /* app name */
3032 aub_out(bufmgr_gem, 0); /* timestamp */
3033 aub_out(bufmgr_gem, 0); /* timestamp */
3034 aub_out(bufmgr_gem, 0); /* comment len */
3036 /* Set up the GTT. The max we can handle is 256M */
3037 aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
3038 aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
3039 aub_out(bufmgr_gem, 0); /* subtype */
3040 aub_out(bufmgr_gem, 0); /* offset */
3041 aub_out(bufmgr_gem, gtt_size); /* size */
3042 if (bufmgr_gem->gen >= 8)
3043 aub_out(bufmgr_gem, 0);
3044 for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
3045 aub_out(bufmgr_gem, entry);
3049 drm_public drm_intel_context *
3050 drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
3052 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3053 struct drm_i915_gem_context_create create;
3054 drm_intel_context *context = NULL;
3057 context = calloc(1, sizeof(*context));
3062 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
3064 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
3070 context->ctx_id = create.ctx_id;
3071 context->bufmgr = bufmgr;
3077 drm_intel_gem_context_destroy(drm_intel_context *ctx)
3079 drm_intel_bufmgr_gem *bufmgr_gem;
3080 struct drm_i915_gem_context_destroy destroy;
3088 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3089 destroy.ctx_id = ctx->ctx_id;
3090 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
3093 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
3100 drm_intel_get_reset_stats(drm_intel_context *ctx,
3101 uint32_t *reset_count,
3105 drm_intel_bufmgr_gem *bufmgr_gem;
3106 struct drm_i915_reset_stats stats;
3112 memset(&stats, 0, sizeof(stats));
3114 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3115 stats.ctx_id = ctx->ctx_id;
3116 ret = drmIoctl(bufmgr_gem->fd,
3117 DRM_IOCTL_I915_GET_RESET_STATS,
3120 if (reset_count != NULL)
3121 *reset_count = stats.reset_count;
3124 *active = stats.batch_active;
3126 if (pending != NULL)
3127 *pending = stats.batch_pending;
3134 drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
3138 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3139 struct drm_i915_reg_read reg_read;
3143 reg_read.offset = offset;
3145 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read);
3147 *result = reg_read.val;
3153 * Annotate the given bo for use in aub dumping.
3155 * \param annotations is an array of drm_intel_aub_annotation objects
3156 * describing the type of data in various sections of the bo. Each
3157 * element of the array specifies the type and subtype of a section of
3158 * the bo, and the past-the-end offset of that section. The elements
3159 * of \c annotations must be sorted so that ending_offset is
3162 * \param count is the number of elements in the \c annotations array.
3163 * If \c count is zero, then \c annotations will not be dereferenced.
3165 * Annotations are copied into a private data structure, so caller may
3166 * re-use the memory pointed to by \c annotations after the call
3169 * Annotations are stored for the lifetime of the bo; to reset to the
3170 * default state (no annotations), call this function with a \c count
3174 drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
3175 drm_intel_aub_annotation *annotations,
3178 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3179 unsigned size = sizeof(*annotations) * count;
3180 drm_intel_aub_annotation *new_annotations =
3181 count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL;
3182 if (new_annotations == NULL) {
3183 free(bo_gem->aub_annotations);
3184 bo_gem->aub_annotations = NULL;
3185 bo_gem->aub_annotation_count = 0;
3188 memcpy(new_annotations, annotations, size);
3189 bo_gem->aub_annotations = new_annotations;
3190 bo_gem->aub_annotation_count = count;
3193 static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
3194 static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
3196 static drm_intel_bufmgr_gem *
3197 drm_intel_bufmgr_gem_find(int fd)
3199 drm_intel_bufmgr_gem *bufmgr_gem;
3201 DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
3202 if (bufmgr_gem->fd == fd) {
3203 atomic_inc(&bufmgr_gem->refcount);
3212 drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
3214 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3216 if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
3217 pthread_mutex_lock(&bufmgr_list_mutex);
3219 if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
3220 DRMLISTDEL(&bufmgr_gem->managers);
3221 drm_intel_bufmgr_gem_destroy(bufmgr);
3224 pthread_mutex_unlock(&bufmgr_list_mutex);
3229 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3230 * and manage map buffer objections.
3232 * \param fd File descriptor of the opened DRM device.
3234 drm_public drm_intel_bufmgr *
3235 drm_intel_bufmgr_gem_init(int fd, int batch_size)
3237 drm_intel_bufmgr_gem *bufmgr_gem;
3238 struct drm_i915_gem_get_aperture aperture;
3239 drm_i915_getparam_t gp;
3243 pthread_mutex_lock(&bufmgr_list_mutex);
3245 bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
3249 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
3250 if (bufmgr_gem == NULL)
3253 bufmgr_gem->fd = fd;
3254 atomic_set(&bufmgr_gem->refcount, 1);
3256 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
3262 ret = drmIoctl(bufmgr_gem->fd,
3263 DRM_IOCTL_I915_GEM_GET_APERTURE,
3267 bufmgr_gem->gtt_size = aperture.aper_available_size;
3269 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3271 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
3272 fprintf(stderr, "Assuming %dkB available aperture size.\n"
3273 "May lead to reduced performance or incorrect "
3275 (int)bufmgr_gem->gtt_size / 1024);
3278 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
3280 if (IS_GEN2(bufmgr_gem->pci_device))
3281 bufmgr_gem->gen = 2;
3282 else if (IS_GEN3(bufmgr_gem->pci_device))
3283 bufmgr_gem->gen = 3;
3284 else if (IS_GEN4(bufmgr_gem->pci_device))
3285 bufmgr_gem->gen = 4;
3286 else if (IS_GEN5(bufmgr_gem->pci_device))
3287 bufmgr_gem->gen = 5;
3288 else if (IS_GEN6(bufmgr_gem->pci_device))
3289 bufmgr_gem->gen = 6;
3290 else if (IS_GEN7(bufmgr_gem->pci_device))
3291 bufmgr_gem->gen = 7;
3292 else if (IS_GEN8(bufmgr_gem->pci_device))
3293 bufmgr_gem->gen = 8;
3300 if (IS_GEN3(bufmgr_gem->pci_device) &&
3301 bufmgr_gem->gtt_size > 256*1024*1024) {
3302 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3303 * be used for tiled blits. To simplify the accounting, just
3304 * substract the unmappable part (fixed to 256MB on all known
3305 * gen3 devices) if the kernel advertises it. */
3306 bufmgr_gem->gtt_size -= 256*1024*1024;
3312 gp.param = I915_PARAM_HAS_EXECBUF2;
3313 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3317 gp.param = I915_PARAM_HAS_BSD;
3318 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3319 bufmgr_gem->has_bsd = ret == 0;
3321 gp.param = I915_PARAM_HAS_BLT;
3322 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3323 bufmgr_gem->has_blt = ret == 0;
3325 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
3326 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3327 bufmgr_gem->has_relaxed_fencing = ret == 0;
3329 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3330 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3331 bufmgr_gem->has_wait_timeout = ret == 0;
3333 gp.param = I915_PARAM_HAS_LLC;
3334 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3336 /* Kernel does not supports HAS_LLC query, fallback to GPU
3337 * generation detection and assume that we have LLC on GEN6/7
3339 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3340 IS_GEN7(bufmgr_gem->pci_device));
3342 bufmgr_gem->has_llc = *gp.value;
3344 gp.param = I915_PARAM_HAS_VEBOX;
3345 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3346 bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
3348 if (bufmgr_gem->gen < 4) {
3349 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
3350 gp.value = &bufmgr_gem->available_fences;
3351 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3353 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
3355 fprintf(stderr, "param: %d, val: %d\n", gp.param,
3357 bufmgr_gem->available_fences = 0;
3359 /* XXX The kernel reports the total number of fences,
3360 * including any that may be pinned.
3362 * We presume that there will be at least one pinned
3363 * fence for the scanout buffer, but there may be more
3364 * than one scanout and the user may be manually
3365 * pinning buffers. Let's move to execbuffer2 and
3366 * thereby forget the insanity of using fences...
3368 bufmgr_gem->available_fences -= 2;
3369 if (bufmgr_gem->available_fences < 0)
3370 bufmgr_gem->available_fences = 0;
3374 /* Let's go with one relocation per every 2 dwords (but round down a bit
3375 * since a power of two will mean an extra page allocation for the reloc
3378 * Every 4 was too few for the blender benchmark.
3380 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
3382 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
3383 bufmgr_gem->bufmgr.bo_alloc_for_render =
3384 drm_intel_gem_bo_alloc_for_render;
3385 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
3386 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
3387 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
3388 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
3389 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
3390 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
3391 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
3392 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
3393 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
3394 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
3395 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
3396 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
3397 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
3398 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
3399 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
3400 /* Use the new one if available */
3402 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
3403 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3405 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
3406 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
3407 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3408 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
3409 bufmgr_gem->bufmgr.debug = 0;
3410 bufmgr_gem->bufmgr.check_aperture_space =
3411 drm_intel_gem_check_aperture_space;
3412 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3413 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
3414 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
3415 drm_intel_gem_get_pipe_from_crtc_id;
3416 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
3418 DRMINITLISTHEAD(&bufmgr_gem->named);
3419 init_cache_buckets(bufmgr_gem);
3421 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
3422 bufmgr_gem->vma_max = -1; /* unlimited by default */
3424 DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
3427 pthread_mutex_unlock(&bufmgr_list_mutex);
3429 return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;