1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
52 #include <sys/types.h>
57 #define ETIME ETIMEDOUT
59 #include "libdrm_macros.h"
60 #include "libdrm_lists.h"
61 #include "intel_bufmgr.h"
62 #include "intel_bufmgr_priv.h"
63 #include "intel_chipset.h"
76 #define memclear(s) memset(&s, 0, sizeof(s))
78 #define DBG(...) do { \
79 if (bufmgr_gem->bufmgr.debug) \
80 fprintf(stderr, __VA_ARGS__); \
83 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
84 #define MAX2(A, B) ((A) > (B) ? (A) : (B))
87 * upper_32_bits - return bits 32-63 of a number
88 * @n: the number we're accessing
90 * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
91 * the "right shift count >= width of type" warning when that quantity is
94 #define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16))
97 * lower_32_bits - return bits 0-31 of a number
98 * @n: the number we're accessing
100 #define lower_32_bits(n) ((__u32)(n))
102 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
104 struct drm_intel_gem_bo_bucket {
109 typedef struct _drm_intel_bufmgr_gem {
110 drm_intel_bufmgr bufmgr;
118 pthread_mutex_t lock;
120 struct drm_i915_gem_exec_object *exec_objects;
121 struct drm_i915_gem_exec_object2 *exec2_objects;
122 drm_intel_bo **exec_bos;
126 /** Array of lists of cached gem objects of power-of-two sizes */
127 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
131 drmMMListHead managers;
134 drmMMListHead vma_cache;
135 int vma_count, vma_open, vma_max;
138 int available_fences;
141 unsigned int has_bsd : 1;
142 unsigned int has_blt : 1;
143 unsigned int has_relaxed_fencing : 1;
144 unsigned int has_llc : 1;
145 unsigned int has_wait_timeout : 1;
146 unsigned int bo_reuse : 1;
147 unsigned int no_exec : 1;
148 unsigned int has_vebox : 1;
156 } drm_intel_bufmgr_gem;
158 #define DRM_INTEL_RELOC_FENCE (1<<0)
160 typedef struct _drm_intel_reloc_target_info {
163 } drm_intel_reloc_target;
165 struct _drm_intel_bo_gem {
173 * Kenel-assigned global name for this object
175 * List contains both flink named and prime fd'd objects
177 unsigned int global_name;
178 drmMMListHead name_list;
181 * Index of the buffer within the validation list while preparing a
182 * batchbuffer execution.
187 * Current tiling mode
189 uint32_t tiling_mode;
190 uint32_t swizzle_mode;
191 unsigned long stride;
195 /** Array passed to the DRM containing relocation information. */
196 struct drm_i915_gem_relocation_entry *relocs;
198 * Array of info structs corresponding to relocs[i].target_handle etc
200 drm_intel_reloc_target *reloc_target_info;
201 /** Number of entries in relocs */
203 /** Mapped address for the buffer, saved across map/unmap cycles */
205 /** GTT virtual address for the buffer, saved across map/unmap cycles */
208 * Virtual address of the buffer allocated by user, used for userptr
213 drmMMListHead vma_list;
219 * Boolean of whether this BO and its children have been included in
220 * the current drm_intel_bufmgr_check_aperture_space() total.
222 bool included_in_check_aperture;
225 * Boolean of whether this buffer has been used as a relocation
226 * target and had its size accounted for, and thus can't have any
227 * further relocations added to it.
229 bool used_as_reloc_target;
232 * Boolean of whether we have encountered an error whilst building the relocation tree.
237 * Boolean of whether this buffer can be re-used
242 * Boolean of whether the GPU is definitely not accessing the buffer.
244 * This is only valid when reusable, since non-reusable
245 * buffers are those that have been shared wth other
246 * processes, so we don't know their state.
251 * Boolean of whether this buffer was allocated with userptr
256 * Boolean of whether this buffer can be placed in the full 48-bit
257 * address range on gen8+.
259 * By default, buffers will be keep in a 32-bit range, unless this
260 * flag is explicitly set.
262 bool use_48b_address_range;
265 * Size in bytes of this buffer and its relocation descendents.
267 * Used to avoid costly tree walking in
268 * drm_intel_bufmgr_check_aperture in the common case.
273 * Number of potential fence registers required by this buffer and its
276 int reloc_tree_fences;
278 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
279 bool mapped_cpu_write;
283 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
286 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
289 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
290 uint32_t * swizzle_mode);
293 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
294 uint32_t tiling_mode,
297 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
300 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
302 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
304 static inline drm_intel_bo_gem *to_bo_gem(drm_intel_bo *bo)
306 return (drm_intel_bo_gem *)bo;
310 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
311 uint32_t *tiling_mode)
313 unsigned long min_size, max_size;
316 if (*tiling_mode == I915_TILING_NONE)
319 /* 965+ just need multiples of page size for tiling */
320 if (bufmgr_gem->gen >= 4)
321 return ROUND_UP_TO(size, 4096);
323 /* Older chips need powers of two, of at least 512k or 1M */
324 if (bufmgr_gem->gen == 3) {
325 min_size = 1024*1024;
326 max_size = 128*1024*1024;
329 max_size = 64*1024*1024;
332 if (size > max_size) {
333 *tiling_mode = I915_TILING_NONE;
337 /* Do we need to allocate every page for the fence? */
338 if (bufmgr_gem->has_relaxed_fencing)
339 return ROUND_UP_TO(size, 4096);
341 for (i = min_size; i < size; i <<= 1)
348 * Round a given pitch up to the minimum required for X tiling on a
349 * given chip. We use 512 as the minimum to allow for a later tiling
353 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
354 unsigned long pitch, uint32_t *tiling_mode)
356 unsigned long tile_width;
359 /* If untiled, then just align it so that we can do rendering
360 * to it with the 3D engine.
362 if (*tiling_mode == I915_TILING_NONE)
363 return ALIGN(pitch, 64);
365 if (*tiling_mode == I915_TILING_X
366 || (IS_915(bufmgr_gem->pci_device)
367 && *tiling_mode == I915_TILING_Y))
372 /* 965 is flexible */
373 if (bufmgr_gem->gen >= 4)
374 return ROUND_UP_TO(pitch, tile_width);
376 /* The older hardware has a maximum pitch of 8192 with tiled
377 * surfaces, so fallback to untiled if it's too large.
380 *tiling_mode = I915_TILING_NONE;
381 return ALIGN(pitch, 64);
384 /* Pre-965 needs power of two tile width */
385 for (i = tile_width; i < pitch; i <<= 1)
391 static struct drm_intel_gem_bo_bucket *
392 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
397 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
398 struct drm_intel_gem_bo_bucket *bucket =
399 &bufmgr_gem->cache_bucket[i];
400 if (bucket->size >= size) {
409 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
413 for (i = 0; i < bufmgr_gem->exec_count; i++) {
414 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
415 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
417 if (bo_gem->relocs == NULL) {
418 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
423 for (j = 0; j < bo_gem->reloc_count; j++) {
424 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
425 drm_intel_bo_gem *target_gem =
426 (drm_intel_bo_gem *) target_bo;
428 DBG("%2d: %d (%s)@0x%08x %08x -> "
429 "%d (%s)@0x%08x %08x + 0x%08x\n",
431 bo_gem->gem_handle, bo_gem->name,
432 upper_32_bits(bo_gem->relocs[j].offset),
433 lower_32_bits(bo_gem->relocs[j].offset),
434 target_gem->gem_handle,
436 upper_32_bits(target_bo->offset64),
437 lower_32_bits(target_bo->offset64),
438 bo_gem->relocs[j].delta);
444 drm_intel_gem_bo_reference(drm_intel_bo *bo)
446 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
448 atomic_inc(&bo_gem->refcount);
452 * Adds the given buffer to the list of buffers to be validated (moved into the
453 * appropriate memory type) with the next batch submission.
455 * If a buffer is validated multiple times in a batch submission, it ends up
456 * with the intersection of the memory type flags and the union of the
460 drm_intel_add_validate_buffer(drm_intel_bo *bo)
462 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
463 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
466 if (bo_gem->validate_index != -1)
469 /* Extend the array of validation entries as necessary. */
470 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
471 int new_size = bufmgr_gem->exec_size * 2;
476 bufmgr_gem->exec_objects =
477 realloc(bufmgr_gem->exec_objects,
478 sizeof(*bufmgr_gem->exec_objects) * new_size);
479 bufmgr_gem->exec_bos =
480 realloc(bufmgr_gem->exec_bos,
481 sizeof(*bufmgr_gem->exec_bos) * new_size);
482 bufmgr_gem->exec_size = new_size;
485 index = bufmgr_gem->exec_count;
486 bo_gem->validate_index = index;
487 /* Fill in array entry */
488 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
489 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
490 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
491 bufmgr_gem->exec_objects[index].alignment = bo->align;
492 bufmgr_gem->exec_objects[index].offset = 0;
493 bufmgr_gem->exec_bos[index] = bo;
494 bufmgr_gem->exec_count++;
498 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
500 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
501 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
506 flags |= EXEC_OBJECT_NEEDS_FENCE;
507 if (bo_gem->use_48b_address_range)
508 flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
510 if (bo_gem->validate_index != -1) {
511 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags;
515 /* Extend the array of validation entries as necessary. */
516 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
517 int new_size = bufmgr_gem->exec_size * 2;
522 bufmgr_gem->exec2_objects =
523 realloc(bufmgr_gem->exec2_objects,
524 sizeof(*bufmgr_gem->exec2_objects) * new_size);
525 bufmgr_gem->exec_bos =
526 realloc(bufmgr_gem->exec_bos,
527 sizeof(*bufmgr_gem->exec_bos) * new_size);
528 bufmgr_gem->exec_size = new_size;
531 index = bufmgr_gem->exec_count;
532 bo_gem->validate_index = index;
533 /* Fill in array entry */
534 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
535 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
536 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
537 bufmgr_gem->exec2_objects[index].alignment = bo->align;
538 bufmgr_gem->exec2_objects[index].offset = 0;
539 bufmgr_gem->exec_bos[index] = bo;
540 bufmgr_gem->exec2_objects[index].flags = flags;
541 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
542 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
543 bufmgr_gem->exec_count++;
546 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
550 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
551 drm_intel_bo_gem *bo_gem,
552 unsigned int alignment)
556 assert(!bo_gem->used_as_reloc_target);
558 /* The older chipsets are far-less flexible in terms of tiling,
559 * and require tiled buffer to be size aligned in the aperture.
560 * This means that in the worst possible case we will need a hole
561 * twice as large as the object in order for it to fit into the
562 * aperture. Optimal packing is for wimps.
564 size = bo_gem->bo.size;
565 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
566 unsigned int min_size;
568 if (bufmgr_gem->has_relaxed_fencing) {
569 if (bufmgr_gem->gen == 3)
570 min_size = 1024*1024;
574 while (min_size < size)
579 /* Account for worst-case alignment. */
580 alignment = MAX2(alignment, min_size);
583 bo_gem->reloc_tree_size = size + alignment;
587 drm_intel_setup_reloc_list(drm_intel_bo *bo)
589 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
590 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
591 unsigned int max_relocs = bufmgr_gem->max_relocs;
593 if (bo->size / 4 < max_relocs)
594 max_relocs = bo->size / 4;
596 bo_gem->relocs = malloc(max_relocs *
597 sizeof(struct drm_i915_gem_relocation_entry));
598 bo_gem->reloc_target_info = malloc(max_relocs *
599 sizeof(drm_intel_reloc_target));
600 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
601 bo_gem->has_error = true;
603 free (bo_gem->relocs);
604 bo_gem->relocs = NULL;
606 free (bo_gem->reloc_target_info);
607 bo_gem->reloc_target_info = NULL;
616 drm_intel_gem_bo_busy(drm_intel_bo *bo)
618 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
619 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
620 struct drm_i915_gem_busy busy;
623 if (bo_gem->reusable && bo_gem->idle)
627 busy.handle = bo_gem->gem_handle;
629 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
631 bo_gem->idle = !busy.busy;
636 return (ret == 0 && busy.busy);
640 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
641 drm_intel_bo_gem *bo_gem, int state)
643 struct drm_i915_gem_madvise madv;
646 madv.handle = bo_gem->gem_handle;
649 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
651 return madv.retained;
655 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
657 return drm_intel_gem_bo_madvise_internal
658 ((drm_intel_bufmgr_gem *) bo->bufmgr,
659 (drm_intel_bo_gem *) bo,
663 /* drop the oldest entries that have been purged by the kernel */
665 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
666 struct drm_intel_gem_bo_bucket *bucket)
668 while (!DRMLISTEMPTY(&bucket->head)) {
669 drm_intel_bo_gem *bo_gem;
671 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
672 bucket->head.next, head);
673 if (drm_intel_gem_bo_madvise_internal
674 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
677 DRMLISTDEL(&bo_gem->head);
678 drm_intel_gem_bo_free(&bo_gem->bo);
682 static drm_intel_bo *
683 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
687 uint32_t tiling_mode,
688 unsigned long stride,
689 unsigned int alignment)
691 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
692 drm_intel_bo_gem *bo_gem;
693 unsigned int page_size = getpagesize();
695 struct drm_intel_gem_bo_bucket *bucket;
696 bool alloc_from_cache;
697 unsigned long bo_size;
698 bool for_render = false;
700 if (flags & BO_ALLOC_FOR_RENDER)
703 /* Round the allocated size up to a power of two number of pages. */
704 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
706 /* If we don't have caching at this size, don't actually round the
709 if (bucket == NULL) {
711 if (bo_size < page_size)
714 bo_size = bucket->size;
717 pthread_mutex_lock(&bufmgr_gem->lock);
718 /* Get a buffer out of the cache if available */
720 alloc_from_cache = false;
721 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
723 /* Allocate new render-target BOs from the tail (MRU)
724 * of the list, as it will likely be hot in the GPU
725 * cache and in the aperture for us.
727 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
728 bucket->head.prev, head);
729 DRMLISTDEL(&bo_gem->head);
730 alloc_from_cache = true;
731 bo_gem->bo.align = alignment;
733 assert(alignment == 0);
734 /* For non-render-target BOs (where we're probably
735 * going to map it first thing in order to fill it
736 * with data), check if the last BO in the cache is
737 * unbusy, and only reuse in that case. Otherwise,
738 * allocating a new buffer is probably faster than
739 * waiting for the GPU to finish.
741 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
742 bucket->head.next, head);
743 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
744 alloc_from_cache = true;
745 DRMLISTDEL(&bo_gem->head);
749 if (alloc_from_cache) {
750 if (!drm_intel_gem_bo_madvise_internal
751 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
752 drm_intel_gem_bo_free(&bo_gem->bo);
753 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
758 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
761 drm_intel_gem_bo_free(&bo_gem->bo);
766 pthread_mutex_unlock(&bufmgr_gem->lock);
768 if (!alloc_from_cache) {
769 struct drm_i915_gem_create create;
771 bo_gem = calloc(1, sizeof(*bo_gem));
775 bo_gem->bo.size = bo_size;
778 create.size = bo_size;
780 ret = drmIoctl(bufmgr_gem->fd,
781 DRM_IOCTL_I915_GEM_CREATE,
783 bo_gem->gem_handle = create.handle;
784 bo_gem->bo.handle = bo_gem->gem_handle;
789 bo_gem->bo.bufmgr = bufmgr;
790 bo_gem->bo.align = alignment;
792 bo_gem->tiling_mode = I915_TILING_NONE;
793 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
796 /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
797 list (vma_list), so better set the list head here */
798 DRMINITLISTHEAD(&bo_gem->name_list);
799 DRMINITLISTHEAD(&bo_gem->vma_list);
800 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
803 drm_intel_gem_bo_free(&bo_gem->bo);
809 atomic_set(&bo_gem->refcount, 1);
810 bo_gem->validate_index = -1;
811 bo_gem->reloc_tree_fences = 0;
812 bo_gem->used_as_reloc_target = false;
813 bo_gem->has_error = false;
814 bo_gem->reusable = true;
815 bo_gem->use_48b_address_range = false;
817 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
819 DBG("bo_create: buf %d (%s) %ldb\n",
820 bo_gem->gem_handle, bo_gem->name, size);
825 static drm_intel_bo *
826 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
829 unsigned int alignment)
831 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
837 static drm_intel_bo *
838 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
841 unsigned int alignment)
843 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
844 I915_TILING_NONE, 0, 0);
847 static drm_intel_bo *
848 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
849 int x, int y, int cpp, uint32_t *tiling_mode,
850 unsigned long *pitch, unsigned long flags)
852 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
853 unsigned long size, stride;
857 unsigned long aligned_y, height_alignment;
859 tiling = *tiling_mode;
861 /* If we're tiled, our allocations are in 8 or 32-row blocks,
862 * so failure to align our height means that we won't allocate
865 * If we're untiled, we still have to align to 2 rows high
866 * because the data port accesses 2x2 blocks even if the
867 * bottom row isn't to be rendered, so failure to align means
868 * we could walk off the end of the GTT and fault. This is
869 * documented on 965, and may be the case on older chipsets
870 * too so we try to be careful.
873 height_alignment = 2;
875 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
876 height_alignment = 16;
877 else if (tiling == I915_TILING_X
878 || (IS_915(bufmgr_gem->pci_device)
879 && tiling == I915_TILING_Y))
880 height_alignment = 8;
881 else if (tiling == I915_TILING_Y)
882 height_alignment = 32;
883 aligned_y = ALIGN(y, height_alignment);
886 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
887 size = stride * aligned_y;
888 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
889 } while (*tiling_mode != tiling);
892 if (tiling == I915_TILING_NONE)
895 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
899 static drm_intel_bo *
900 drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
903 uint32_t tiling_mode,
908 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
909 drm_intel_bo_gem *bo_gem;
911 struct drm_i915_gem_userptr userptr;
913 /* Tiling with userptr surfaces is not supported
914 * on all hardware so refuse it for time being.
916 if (tiling_mode != I915_TILING_NONE)
919 bo_gem = calloc(1, sizeof(*bo_gem));
923 bo_gem->bo.size = size;
926 userptr.user_ptr = (__u64)((unsigned long)addr);
927 userptr.user_size = size;
928 userptr.flags = flags;
930 ret = drmIoctl(bufmgr_gem->fd,
931 DRM_IOCTL_I915_GEM_USERPTR,
934 DBG("bo_create_userptr: "
935 "ioctl failed with user ptr %p size 0x%lx, "
936 "user flags 0x%lx\n", addr, size, flags);
941 bo_gem->gem_handle = userptr.handle;
942 bo_gem->bo.handle = bo_gem->gem_handle;
943 bo_gem->bo.bufmgr = bufmgr;
944 bo_gem->is_userptr = true;
945 bo_gem->bo.virtual = addr;
946 /* Save the address provided by user */
947 bo_gem->user_virtual = addr;
948 bo_gem->tiling_mode = I915_TILING_NONE;
949 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
952 DRMINITLISTHEAD(&bo_gem->name_list);
953 DRMINITLISTHEAD(&bo_gem->vma_list);
956 atomic_set(&bo_gem->refcount, 1);
957 bo_gem->validate_index = -1;
958 bo_gem->reloc_tree_fences = 0;
959 bo_gem->used_as_reloc_target = false;
960 bo_gem->has_error = false;
961 bo_gem->reusable = false;
962 bo_gem->use_48b_address_range = false;
964 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
966 DBG("bo_create_userptr: "
967 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
968 addr, bo_gem->gem_handle, bo_gem->name,
969 size, stride, tiling_mode);
975 has_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
980 struct drm_i915_gem_userptr userptr;
982 pgsz = sysconf(_SC_PAGESIZE);
985 ret = posix_memalign(&ptr, pgsz, pgsz);
987 DBG("Failed to get a page (%ld) for userptr detection!\n",
993 userptr.user_ptr = (__u64)(unsigned long)ptr;
994 userptr.user_size = pgsz;
997 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
999 if (errno == ENODEV && userptr.flags == 0) {
1000 userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
1007 /* We don't release the userptr bo here as we want to keep the
1008 * kernel mm tracking alive for our lifetime. The first time we
1009 * create a userptr object the kernel has to install a mmu_notifer
1010 * which is a heavyweight operation (e.g. it requires taking all
1011 * mm_locks and stop_machine()).
1014 bufmgr_gem->userptr_active.ptr = ptr;
1015 bufmgr_gem->userptr_active.handle = userptr.handle;
1020 static drm_intel_bo *
1021 check_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
1024 uint32_t tiling_mode,
1027 unsigned long flags)
1029 if (has_userptr((drm_intel_bufmgr_gem *)bufmgr))
1030 bufmgr->bo_alloc_userptr = drm_intel_gem_bo_alloc_userptr;
1032 bufmgr->bo_alloc_userptr = NULL;
1034 return drm_intel_bo_alloc_userptr(bufmgr, name, addr,
1035 tiling_mode, stride, size, flags);
1039 * Returns a drm_intel_bo wrapping the given buffer object handle.
1041 * This can be used when one application needs to pass a buffer object
1045 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
1047 unsigned int handle)
1049 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1050 drm_intel_bo_gem *bo_gem;
1052 struct drm_gem_open open_arg;
1053 struct drm_i915_gem_get_tiling get_tiling;
1054 drmMMListHead *list;
1056 /* At the moment most applications only have a few named bo.
1057 * For instance, in a DRI client only the render buffers passed
1058 * between X and the client are named. And since X returns the
1059 * alternating names for the front/back buffer a linear search
1060 * provides a sufficiently fast match.
1062 pthread_mutex_lock(&bufmgr_gem->lock);
1063 for (list = bufmgr_gem->named.next;
1064 list != &bufmgr_gem->named;
1065 list = list->next) {
1066 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
1067 if (bo_gem->global_name == handle) {
1068 drm_intel_gem_bo_reference(&bo_gem->bo);
1069 pthread_mutex_unlock(&bufmgr_gem->lock);
1075 open_arg.name = handle;
1076 ret = drmIoctl(bufmgr_gem->fd,
1080 DBG("Couldn't reference %s handle 0x%08x: %s\n",
1081 name, handle, strerror(errno));
1082 pthread_mutex_unlock(&bufmgr_gem->lock);
1085 /* Now see if someone has used a prime handle to get this
1086 * object from the kernel before by looking through the list
1087 * again for a matching gem_handle
1089 for (list = bufmgr_gem->named.next;
1090 list != &bufmgr_gem->named;
1091 list = list->next) {
1092 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
1093 if (bo_gem->gem_handle == open_arg.handle) {
1094 drm_intel_gem_bo_reference(&bo_gem->bo);
1095 pthread_mutex_unlock(&bufmgr_gem->lock);
1100 bo_gem = calloc(1, sizeof(*bo_gem));
1102 pthread_mutex_unlock(&bufmgr_gem->lock);
1106 bo_gem->bo.size = open_arg.size;
1107 bo_gem->bo.offset = 0;
1108 bo_gem->bo.offset64 = 0;
1109 bo_gem->bo.virtual = NULL;
1110 bo_gem->bo.bufmgr = bufmgr;
1111 bo_gem->name = name;
1112 atomic_set(&bo_gem->refcount, 1);
1113 bo_gem->validate_index = -1;
1114 bo_gem->gem_handle = open_arg.handle;
1115 bo_gem->bo.handle = open_arg.handle;
1116 bo_gem->global_name = handle;
1117 bo_gem->reusable = false;
1118 bo_gem->use_48b_address_range = false;
1120 memclear(get_tiling);
1121 get_tiling.handle = bo_gem->gem_handle;
1122 ret = drmIoctl(bufmgr_gem->fd,
1123 DRM_IOCTL_I915_GEM_GET_TILING,
1126 drm_intel_gem_bo_unreference(&bo_gem->bo);
1127 pthread_mutex_unlock(&bufmgr_gem->lock);
1130 bo_gem->tiling_mode = get_tiling.tiling_mode;
1131 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
1132 /* XXX stride is unknown */
1133 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
1135 DRMINITLISTHEAD(&bo_gem->vma_list);
1136 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
1137 pthread_mutex_unlock(&bufmgr_gem->lock);
1138 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
1144 drm_intel_gem_bo_free(drm_intel_bo *bo)
1146 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1147 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1148 struct drm_gem_close close;
1151 DRMLISTDEL(&bo_gem->vma_list);
1152 if (bo_gem->mem_virtual) {
1153 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
1154 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1155 bufmgr_gem->vma_count--;
1157 if (bo_gem->gtt_virtual) {
1158 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1159 bufmgr_gem->vma_count--;
1162 /* Close this object */
1164 close.handle = bo_gem->gem_handle;
1165 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
1167 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
1168 bo_gem->gem_handle, bo_gem->name, strerror(errno));
1174 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
1177 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1179 if (bo_gem->mem_virtual)
1180 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
1182 if (bo_gem->gtt_virtual)
1183 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
1187 /** Frees all cached buffers significantly older than @time. */
1189 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
1193 if (bufmgr_gem->time == time)
1196 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1197 struct drm_intel_gem_bo_bucket *bucket =
1198 &bufmgr_gem->cache_bucket[i];
1200 while (!DRMLISTEMPTY(&bucket->head)) {
1201 drm_intel_bo_gem *bo_gem;
1203 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1204 bucket->head.next, head);
1205 if (time - bo_gem->free_time <= 1)
1208 DRMLISTDEL(&bo_gem->head);
1210 drm_intel_gem_bo_free(&bo_gem->bo);
1214 bufmgr_gem->time = time;
1217 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
1221 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
1222 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
1224 if (bufmgr_gem->vma_max < 0)
1227 /* We may need to evict a few entries in order to create new mmaps */
1228 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
1232 while (bufmgr_gem->vma_count > limit) {
1233 drm_intel_bo_gem *bo_gem;
1235 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1236 bufmgr_gem->vma_cache.next,
1238 assert(bo_gem->map_count == 0);
1239 DRMLISTDELINIT(&bo_gem->vma_list);
1241 if (bo_gem->mem_virtual) {
1242 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1243 bo_gem->mem_virtual = NULL;
1244 bufmgr_gem->vma_count--;
1246 if (bo_gem->gtt_virtual) {
1247 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1248 bo_gem->gtt_virtual = NULL;
1249 bufmgr_gem->vma_count--;
1254 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1255 drm_intel_bo_gem *bo_gem)
1257 bufmgr_gem->vma_open--;
1258 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1259 if (bo_gem->mem_virtual)
1260 bufmgr_gem->vma_count++;
1261 if (bo_gem->gtt_virtual)
1262 bufmgr_gem->vma_count++;
1263 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1266 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1267 drm_intel_bo_gem *bo_gem)
1269 bufmgr_gem->vma_open++;
1270 DRMLISTDEL(&bo_gem->vma_list);
1271 if (bo_gem->mem_virtual)
1272 bufmgr_gem->vma_count--;
1273 if (bo_gem->gtt_virtual)
1274 bufmgr_gem->vma_count--;
1275 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1279 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1281 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1282 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1283 struct drm_intel_gem_bo_bucket *bucket;
1286 /* Unreference all the target buffers */
1287 for (i = 0; i < bo_gem->reloc_count; i++) {
1288 if (bo_gem->reloc_target_info[i].bo != bo) {
1289 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1290 reloc_target_info[i].bo,
1294 bo_gem->reloc_count = 0;
1295 bo_gem->used_as_reloc_target = false;
1297 DBG("bo_unreference final: %d (%s)\n",
1298 bo_gem->gem_handle, bo_gem->name);
1300 /* release memory associated with this object */
1301 if (bo_gem->reloc_target_info) {
1302 free(bo_gem->reloc_target_info);
1303 bo_gem->reloc_target_info = NULL;
1305 if (bo_gem->relocs) {
1306 free(bo_gem->relocs);
1307 bo_gem->relocs = NULL;
1310 /* Clear any left-over mappings */
1311 if (bo_gem->map_count) {
1312 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1313 bo_gem->map_count = 0;
1314 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1315 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1318 DRMLISTDEL(&bo_gem->name_list);
1320 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1321 /* Put the buffer into our internal cache for reuse if we can. */
1322 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1323 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1324 I915_MADV_DONTNEED)) {
1325 bo_gem->free_time = time;
1327 bo_gem->name = NULL;
1328 bo_gem->validate_index = -1;
1330 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1332 drm_intel_gem_bo_free(bo);
1336 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1339 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1341 assert(atomic_read(&bo_gem->refcount) > 0);
1342 if (atomic_dec_and_test(&bo_gem->refcount))
1343 drm_intel_gem_bo_unreference_final(bo, time);
1346 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1348 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1350 assert(atomic_read(&bo_gem->refcount) > 0);
1352 if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
1353 drm_intel_bufmgr_gem *bufmgr_gem =
1354 (drm_intel_bufmgr_gem *) bo->bufmgr;
1355 struct timespec time;
1357 clock_gettime(CLOCK_MONOTONIC, &time);
1359 pthread_mutex_lock(&bufmgr_gem->lock);
1361 if (atomic_dec_and_test(&bo_gem->refcount)) {
1362 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1363 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1366 pthread_mutex_unlock(&bufmgr_gem->lock);
1370 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1372 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1373 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1374 struct drm_i915_gem_set_domain set_domain;
1377 if (bo_gem->is_userptr) {
1378 /* Return the same user ptr */
1379 bo->virtual = bo_gem->user_virtual;
1383 pthread_mutex_lock(&bufmgr_gem->lock);
1385 if (bo_gem->map_count++ == 0)
1386 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1388 if (!bo_gem->mem_virtual) {
1389 struct drm_i915_gem_mmap mmap_arg;
1391 DBG("bo_map: %d (%s), map_count=%d\n",
1392 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1395 mmap_arg.handle = bo_gem->gem_handle;
1396 mmap_arg.size = bo->size;
1397 ret = drmIoctl(bufmgr_gem->fd,
1398 DRM_IOCTL_I915_GEM_MMAP,
1402 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1403 __FILE__, __LINE__, bo_gem->gem_handle,
1404 bo_gem->name, strerror(errno));
1405 if (--bo_gem->map_count == 0)
1406 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1407 pthread_mutex_unlock(&bufmgr_gem->lock);
1410 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1411 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1413 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1414 bo_gem->mem_virtual);
1415 bo->virtual = bo_gem->mem_virtual;
1417 memclear(set_domain);
1418 set_domain.handle = bo_gem->gem_handle;
1419 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1421 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1423 set_domain.write_domain = 0;
1424 ret = drmIoctl(bufmgr_gem->fd,
1425 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1428 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1429 __FILE__, __LINE__, bo_gem->gem_handle,
1434 bo_gem->mapped_cpu_write = true;
1436 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1437 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1438 pthread_mutex_unlock(&bufmgr_gem->lock);
1444 map_gtt(drm_intel_bo *bo)
1446 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1447 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1450 if (bo_gem->is_userptr)
1453 if (bo_gem->map_count++ == 0)
1454 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1456 /* Get a mapping of the buffer if we haven't before. */
1457 if (bo_gem->gtt_virtual == NULL) {
1458 struct drm_i915_gem_mmap_gtt mmap_arg;
1460 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1461 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1464 mmap_arg.handle = bo_gem->gem_handle;
1466 /* Get the fake offset back... */
1467 ret = drmIoctl(bufmgr_gem->fd,
1468 DRM_IOCTL_I915_GEM_MMAP_GTT,
1472 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1474 bo_gem->gem_handle, bo_gem->name,
1476 if (--bo_gem->map_count == 0)
1477 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1482 bo_gem->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1483 MAP_SHARED, bufmgr_gem->fd,
1485 if (bo_gem->gtt_virtual == MAP_FAILED) {
1486 bo_gem->gtt_virtual = NULL;
1488 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1490 bo_gem->gem_handle, bo_gem->name,
1492 if (--bo_gem->map_count == 0)
1493 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1498 bo->virtual = bo_gem->gtt_virtual;
1500 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1501 bo_gem->gtt_virtual);
1507 drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1509 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1510 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1511 struct drm_i915_gem_set_domain set_domain;
1514 pthread_mutex_lock(&bufmgr_gem->lock);
1518 pthread_mutex_unlock(&bufmgr_gem->lock);
1522 /* Now move it to the GTT domain so that the GPU and CPU
1523 * caches are flushed and the GPU isn't actively using the
1526 * The pagefault handler does this domain change for us when
1527 * it has unbound the BO from the GTT, but it's up to us to
1528 * tell it when we're about to use things if we had done
1529 * rendering and it still happens to be bound to the GTT.
1531 memclear(set_domain);
1532 set_domain.handle = bo_gem->gem_handle;
1533 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1534 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1535 ret = drmIoctl(bufmgr_gem->fd,
1536 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1539 DBG("%s:%d: Error setting domain %d: %s\n",
1540 __FILE__, __LINE__, bo_gem->gem_handle,
1544 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1545 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1546 pthread_mutex_unlock(&bufmgr_gem->lock);
1552 * Performs a mapping of the buffer object like the normal GTT
1553 * mapping, but avoids waiting for the GPU to be done reading from or
1554 * rendering to the buffer.
1556 * This is used in the implementation of GL_ARB_map_buffer_range: The
1557 * user asks to create a buffer, then does a mapping, fills some
1558 * space, runs a drawing command, then asks to map it again without
1559 * synchronizing because it guarantees that it won't write over the
1560 * data that the GPU is busy using (or, more specifically, that if it
1561 * does write over the data, it acknowledges that rendering is
1566 drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1568 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1569 #ifdef HAVE_VALGRIND
1570 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1574 /* If the CPU cache isn't coherent with the GTT, then use a
1575 * regular synchronized mapping. The problem is that we don't
1576 * track where the buffer was last used on the CPU side in
1577 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1578 * we would potentially corrupt the buffer even when the user
1579 * does reasonable things.
1581 if (!bufmgr_gem->has_llc)
1582 return drm_intel_gem_bo_map_gtt(bo);
1584 pthread_mutex_lock(&bufmgr_gem->lock);
1588 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1589 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1592 pthread_mutex_unlock(&bufmgr_gem->lock);
1597 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1599 drm_intel_bufmgr_gem *bufmgr_gem;
1600 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1606 if (bo_gem->is_userptr)
1609 bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1611 pthread_mutex_lock(&bufmgr_gem->lock);
1613 if (bo_gem->map_count <= 0) {
1614 DBG("attempted to unmap an unmapped bo\n");
1615 pthread_mutex_unlock(&bufmgr_gem->lock);
1616 /* Preserve the old behaviour of just treating this as a
1617 * no-op rather than reporting the error.
1622 if (bo_gem->mapped_cpu_write) {
1623 struct drm_i915_gem_sw_finish sw_finish;
1625 /* Cause a flush to happen if the buffer's pinned for
1626 * scanout, so the results show up in a timely manner.
1627 * Unlike GTT set domains, this only does work if the
1628 * buffer should be scanout-related.
1630 memclear(sw_finish);
1631 sw_finish.handle = bo_gem->gem_handle;
1632 ret = drmIoctl(bufmgr_gem->fd,
1633 DRM_IOCTL_I915_GEM_SW_FINISH,
1635 ret = ret == -1 ? -errno : 0;
1637 bo_gem->mapped_cpu_write = false;
1640 /* We need to unmap after every innovation as we cannot track
1641 * an open vma for every bo as that will exhaasut the system
1642 * limits and cause later failures.
1644 if (--bo_gem->map_count == 0) {
1645 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1646 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1649 pthread_mutex_unlock(&bufmgr_gem->lock);
1655 drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1657 return drm_intel_gem_bo_unmap(bo);
1661 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1662 unsigned long size, const void *data)
1664 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1665 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1666 struct drm_i915_gem_pwrite pwrite;
1669 if (bo_gem->is_userptr)
1673 pwrite.handle = bo_gem->gem_handle;
1674 pwrite.offset = offset;
1676 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1677 ret = drmIoctl(bufmgr_gem->fd,
1678 DRM_IOCTL_I915_GEM_PWRITE,
1682 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1683 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1684 (int)size, strerror(errno));
1691 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1693 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1694 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1697 memclear(get_pipe_from_crtc_id);
1698 get_pipe_from_crtc_id.crtc_id = crtc_id;
1699 ret = drmIoctl(bufmgr_gem->fd,
1700 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1701 &get_pipe_from_crtc_id);
1703 /* We return -1 here to signal that we don't
1704 * know which pipe is associated with this crtc.
1705 * This lets the caller know that this information
1706 * isn't available; using the wrong pipe for
1707 * vblank waiting can cause the chipset to lock up
1712 return get_pipe_from_crtc_id.pipe;
1716 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1717 unsigned long size, void *data)
1719 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1720 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1721 struct drm_i915_gem_pread pread;
1724 if (bo_gem->is_userptr)
1728 pread.handle = bo_gem->gem_handle;
1729 pread.offset = offset;
1731 pread.data_ptr = (uint64_t) (uintptr_t) data;
1732 ret = drmIoctl(bufmgr_gem->fd,
1733 DRM_IOCTL_I915_GEM_PREAD,
1737 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1738 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1739 (int)size, strerror(errno));
1745 /** Waits for all GPU rendering with the object to have completed. */
1747 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1749 drm_intel_gem_bo_start_gtt_access(bo, 1);
1753 * Waits on a BO for the given amount of time.
1755 * @bo: buffer object to wait for
1756 * @timeout_ns: amount of time to wait in nanoseconds.
1757 * If value is less than 0, an infinite wait will occur.
1759 * Returns 0 if the wait was successful ie. the last batch referencing the
1760 * object has completed within the allotted time. Otherwise some negative return
1761 * value describes the error. Of particular interest is -ETIME when the wait has
1762 * failed to yield the desired result.
1764 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1765 * the operation to give up after a certain amount of time. Another subtle
1766 * difference is the internal locking semantics are different (this variant does
1767 * not hold the lock for the duration of the wait). This makes the wait subject
1768 * to a larger userspace race window.
1770 * The implementation shall wait until the object is no longer actively
1771 * referenced within a batch buffer at the time of the call. The wait will
1772 * not guarantee that the buffer is re-issued via another thread, or an flinked
1773 * handle. Userspace must make sure this race does not occur if such precision
1776 * Note that some kernels have broken the inifite wait for negative values
1777 * promise, upgrade to latest stable kernels if this is the case.
1780 drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
1782 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1783 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1784 struct drm_i915_gem_wait wait;
1787 if (!bufmgr_gem->has_wait_timeout) {
1788 DBG("%s:%d: Timed wait is not supported. Falling back to "
1789 "infinite wait\n", __FILE__, __LINE__);
1791 drm_intel_gem_bo_wait_rendering(bo);
1794 return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
1799 wait.bo_handle = bo_gem->gem_handle;
1800 wait.timeout_ns = timeout_ns;
1801 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1809 * Sets the object to the GTT read and possibly write domain, used by the X
1810 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1812 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1813 * can do tiled pixmaps this way.
1816 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1818 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1819 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1820 struct drm_i915_gem_set_domain set_domain;
1823 memclear(set_domain);
1824 set_domain.handle = bo_gem->gem_handle;
1825 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1826 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1827 ret = drmIoctl(bufmgr_gem->fd,
1828 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1831 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1832 __FILE__, __LINE__, bo_gem->gem_handle,
1833 set_domain.read_domains, set_domain.write_domain,
1839 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1841 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1842 struct drm_gem_close close_bo;
1845 free(bufmgr_gem->exec2_objects);
1846 free(bufmgr_gem->exec_objects);
1847 free(bufmgr_gem->exec_bos);
1849 pthread_mutex_destroy(&bufmgr_gem->lock);
1851 /* Free any cached buffer objects we were going to reuse */
1852 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1853 struct drm_intel_gem_bo_bucket *bucket =
1854 &bufmgr_gem->cache_bucket[i];
1855 drm_intel_bo_gem *bo_gem;
1857 while (!DRMLISTEMPTY(&bucket->head)) {
1858 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1859 bucket->head.next, head);
1860 DRMLISTDEL(&bo_gem->head);
1862 drm_intel_gem_bo_free(&bo_gem->bo);
1866 /* Release userptr bo kept hanging around for optimisation. */
1867 if (bufmgr_gem->userptr_active.ptr) {
1869 close_bo.handle = bufmgr_gem->userptr_active.handle;
1870 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
1871 free(bufmgr_gem->userptr_active.ptr);
1874 "Failed to release test userptr object! (%d) "
1875 "i915 kernel driver may not be sane!\n", errno);
1882 * Adds the target buffer to the validation list and adds the relocation
1883 * to the reloc_buffer's relocation list.
1885 * The relocation entry at the given offset must already contain the
1886 * precomputed relocation value, because the kernel will optimize out
1887 * the relocation entry write when the buffer hasn't moved from the
1888 * last known offset in target_bo.
1891 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1892 drm_intel_bo *target_bo, uint32_t target_offset,
1893 uint32_t read_domains, uint32_t write_domain,
1896 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1897 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1898 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1899 bool fenced_command;
1901 if (bo_gem->has_error)
1904 if (target_bo_gem->has_error) {
1905 bo_gem->has_error = true;
1909 /* We never use HW fences for rendering on 965+ */
1910 if (bufmgr_gem->gen >= 4)
1913 fenced_command = need_fence;
1914 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1917 /* Create a new relocation list if needed */
1918 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1921 /* Check overflow */
1922 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1925 assert(offset <= bo->size - 4);
1926 assert((write_domain & (write_domain - 1)) == 0);
1928 /* An object needing a fence is a tiled buffer, so it won't have
1929 * relocs to other buffers.
1932 assert(target_bo_gem->reloc_count == 0);
1933 target_bo_gem->reloc_tree_fences = 1;
1936 /* Make sure that we're not adding a reloc to something whose size has
1937 * already been accounted for.
1939 assert(!bo_gem->used_as_reloc_target);
1940 if (target_bo_gem != bo_gem) {
1941 target_bo_gem->used_as_reloc_target = true;
1942 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1943 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1946 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1947 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1948 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1949 target_bo_gem->gem_handle;
1950 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1951 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1952 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
1954 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1955 if (target_bo != bo)
1956 drm_intel_gem_bo_reference(target_bo);
1958 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1959 DRM_INTEL_RELOC_FENCE;
1961 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1963 bo_gem->reloc_count++;
1969 drm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
1971 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1972 bo_gem->use_48b_address_range = enable;
1976 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1977 drm_intel_bo *target_bo, uint32_t target_offset,
1978 uint32_t read_domains, uint32_t write_domain)
1980 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1982 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1983 read_domains, write_domain,
1984 !bufmgr_gem->fenced_relocs);
1988 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1989 drm_intel_bo *target_bo,
1990 uint32_t target_offset,
1991 uint32_t read_domains, uint32_t write_domain)
1993 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1994 read_domains, write_domain, true);
1998 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
2000 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2002 return bo_gem->reloc_count;
2006 * Removes existing relocation entries in the BO after "start".
2008 * This allows a user to avoid a two-step process for state setup with
2009 * counting up all the buffer objects and doing a
2010 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
2011 * relocations for the state setup. Instead, save the state of the
2012 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
2013 * state, and then check if it still fits in the aperture.
2015 * Any further drm_intel_bufmgr_check_aperture_space() queries
2016 * involving this buffer in the tree are undefined after this call.
2019 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
2021 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2022 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2024 struct timespec time;
2026 clock_gettime(CLOCK_MONOTONIC, &time);
2028 assert(bo_gem->reloc_count >= start);
2030 /* Unreference the cleared target buffers */
2031 pthread_mutex_lock(&bufmgr_gem->lock);
2033 for (i = start; i < bo_gem->reloc_count; i++) {
2034 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
2035 if (&target_bo_gem->bo != bo) {
2036 bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
2037 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
2041 bo_gem->reloc_count = start;
2043 pthread_mutex_unlock(&bufmgr_gem->lock);
2048 * Walk the tree of relocations rooted at BO and accumulate the list of
2049 * validations to be performed and update the relocation buffers with
2050 * index values into the validation list.
2053 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
2055 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2058 if (bo_gem->relocs == NULL)
2061 for (i = 0; i < bo_gem->reloc_count; i++) {
2062 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2064 if (target_bo == bo)
2067 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2069 /* Continue walking the tree depth-first. */
2070 drm_intel_gem_bo_process_reloc(target_bo);
2072 /* Add the target to the validate list */
2073 drm_intel_add_validate_buffer(target_bo);
2078 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
2080 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2083 if (bo_gem->relocs == NULL)
2086 for (i = 0; i < bo_gem->reloc_count; i++) {
2087 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2090 if (target_bo == bo)
2093 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2095 /* Continue walking the tree depth-first. */
2096 drm_intel_gem_bo_process_reloc2(target_bo);
2098 need_fence = (bo_gem->reloc_target_info[i].flags &
2099 DRM_INTEL_RELOC_FENCE);
2101 /* Add the target to the validate list */
2102 drm_intel_add_validate_buffer2(target_bo, need_fence);
2108 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
2112 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2113 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2114 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2116 /* Update the buffer offset */
2117 if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
2118 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2119 bo_gem->gem_handle, bo_gem->name,
2120 upper_32_bits(bo->offset64),
2121 lower_32_bits(bo->offset64),
2122 upper_32_bits(bufmgr_gem->exec_objects[i].offset),
2123 lower_32_bits(bufmgr_gem->exec_objects[i].offset));
2124 bo->offset64 = bufmgr_gem->exec_objects[i].offset;
2125 bo->offset = bufmgr_gem->exec_objects[i].offset;
2131 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
2135 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2136 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2137 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2139 /* Update the buffer offset */
2140 if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
2141 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2142 bo_gem->gem_handle, bo_gem->name,
2143 upper_32_bits(bo->offset64),
2144 lower_32_bits(bo->offset64),
2145 upper_32_bits(bufmgr_gem->exec2_objects[i].offset),
2146 lower_32_bits(bufmgr_gem->exec2_objects[i].offset));
2147 bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
2148 bo->offset = bufmgr_gem->exec2_objects[i].offset;
2154 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
2155 int x1, int y1, int width, int height,
2156 enum aub_dump_bmp_format format,
2157 int pitch, int offset)
2162 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2163 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
2165 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2166 struct drm_i915_gem_execbuffer execbuf;
2169 if (to_bo_gem(bo)->has_error)
2172 pthread_mutex_lock(&bufmgr_gem->lock);
2173 /* Update indices and set up the validate list. */
2174 drm_intel_gem_bo_process_reloc(bo);
2176 /* Add the batch buffer to the validation list. There are no
2177 * relocations pointing to it.
2179 drm_intel_add_validate_buffer(bo);
2182 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2183 execbuf.buffer_count = bufmgr_gem->exec_count;
2184 execbuf.batch_start_offset = 0;
2185 execbuf.batch_len = used;
2186 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2187 execbuf.num_cliprects = num_cliprects;
2191 ret = drmIoctl(bufmgr_gem->fd,
2192 DRM_IOCTL_I915_GEM_EXECBUFFER,
2196 if (errno == ENOSPC) {
2197 DBG("Execbuffer fails to pin. "
2198 "Estimate: %u. Actual: %u. Available: %u\n",
2199 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2202 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2205 (unsigned int)bufmgr_gem->gtt_size);
2208 drm_intel_update_buffer_offsets(bufmgr_gem);
2210 if (bufmgr_gem->bufmgr.debug)
2211 drm_intel_gem_dump_validation_list(bufmgr_gem);
2213 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2214 drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
2216 bo_gem->idle = false;
2218 /* Disconnect the buffer from the validate list */
2219 bo_gem->validate_index = -1;
2220 bufmgr_gem->exec_bos[i] = NULL;
2222 bufmgr_gem->exec_count = 0;
2223 pthread_mutex_unlock(&bufmgr_gem->lock);
2229 do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2230 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2233 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2234 struct drm_i915_gem_execbuffer2 execbuf;
2238 if (to_bo_gem(bo)->has_error)
2241 switch (flags & 0x7) {
2245 if (!bufmgr_gem->has_blt)
2249 if (!bufmgr_gem->has_bsd)
2252 case I915_EXEC_VEBOX:
2253 if (!bufmgr_gem->has_vebox)
2256 case I915_EXEC_RENDER:
2257 case I915_EXEC_DEFAULT:
2261 pthread_mutex_lock(&bufmgr_gem->lock);
2262 /* Update indices and set up the validate list. */
2263 drm_intel_gem_bo_process_reloc2(bo);
2265 /* Add the batch buffer to the validation list. There are no relocations
2268 drm_intel_add_validate_buffer2(bo, 0);
2271 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2272 execbuf.buffer_count = bufmgr_gem->exec_count;
2273 execbuf.batch_start_offset = 0;
2274 execbuf.batch_len = used;
2275 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2276 execbuf.num_cliprects = num_cliprects;
2279 execbuf.flags = flags;
2281 i915_execbuffer2_set_context_id(execbuf, 0);
2283 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
2286 if (bufmgr_gem->no_exec)
2287 goto skip_execution;
2289 ret = drmIoctl(bufmgr_gem->fd,
2290 DRM_IOCTL_I915_GEM_EXECBUFFER2,
2294 if (ret == -ENOSPC) {
2295 DBG("Execbuffer fails to pin. "
2296 "Estimate: %u. Actual: %u. Available: %u\n",
2297 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2298 bufmgr_gem->exec_count),
2299 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2300 bufmgr_gem->exec_count),
2301 (unsigned int) bufmgr_gem->gtt_size);
2304 drm_intel_update_buffer_offsets2(bufmgr_gem);
2307 if (bufmgr_gem->bufmgr.debug)
2308 drm_intel_gem_dump_validation_list(bufmgr_gem);
2310 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2311 drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
2313 bo_gem->idle = false;
2315 /* Disconnect the buffer from the validate list */
2316 bo_gem->validate_index = -1;
2317 bufmgr_gem->exec_bos[i] = NULL;
2319 bufmgr_gem->exec_count = 0;
2320 pthread_mutex_unlock(&bufmgr_gem->lock);
2326 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2327 drm_clip_rect_t *cliprects, int num_cliprects,
2330 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2335 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2336 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2339 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2344 drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2345 int used, unsigned int flags)
2347 return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
2351 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2353 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2354 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2355 struct drm_i915_gem_pin pin;
2359 pin.handle = bo_gem->gem_handle;
2360 pin.alignment = alignment;
2362 ret = drmIoctl(bufmgr_gem->fd,
2363 DRM_IOCTL_I915_GEM_PIN,
2368 bo->offset64 = pin.offset;
2369 bo->offset = pin.offset;
2374 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2376 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2377 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2378 struct drm_i915_gem_unpin unpin;
2382 unpin.handle = bo_gem->gem_handle;
2384 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2392 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2393 uint32_t tiling_mode,
2396 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2397 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2398 struct drm_i915_gem_set_tiling set_tiling;
2401 if (bo_gem->global_name == 0 &&
2402 tiling_mode == bo_gem->tiling_mode &&
2403 stride == bo_gem->stride)
2406 memset(&set_tiling, 0, sizeof(set_tiling));
2408 /* set_tiling is slightly broken and overwrites the
2409 * input on the error path, so we have to open code
2412 set_tiling.handle = bo_gem->gem_handle;
2413 set_tiling.tiling_mode = tiling_mode;
2414 set_tiling.stride = stride;
2416 ret = ioctl(bufmgr_gem->fd,
2417 DRM_IOCTL_I915_GEM_SET_TILING,
2419 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2423 bo_gem->tiling_mode = set_tiling.tiling_mode;
2424 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2425 bo_gem->stride = set_tiling.stride;
2430 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2433 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2434 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2437 /* Tiling with userptr surfaces is not supported
2438 * on all hardware so refuse it for time being.
2440 if (bo_gem->is_userptr)
2443 /* Linear buffers have no stride. By ensuring that we only ever use
2444 * stride 0 with linear buffers, we simplify our code.
2446 if (*tiling_mode == I915_TILING_NONE)
2449 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2451 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2453 *tiling_mode = bo_gem->tiling_mode;
2458 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2459 uint32_t * swizzle_mode)
2461 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2463 *tiling_mode = bo_gem->tiling_mode;
2464 *swizzle_mode = bo_gem->swizzle_mode;
2469 drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
2471 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2474 drm_intel_bo_gem *bo_gem;
2475 struct drm_i915_gem_get_tiling get_tiling;
2476 drmMMListHead *list;
2478 pthread_mutex_lock(&bufmgr_gem->lock);
2479 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2481 DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno));
2482 pthread_mutex_unlock(&bufmgr_gem->lock);
2487 * See if the kernel has already returned this buffer to us. Just as
2488 * for named buffers, we must not create two bo's pointing at the same
2491 for (list = bufmgr_gem->named.next;
2492 list != &bufmgr_gem->named;
2493 list = list->next) {
2494 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
2495 if (bo_gem->gem_handle == handle) {
2496 drm_intel_gem_bo_reference(&bo_gem->bo);
2497 pthread_mutex_unlock(&bufmgr_gem->lock);
2502 bo_gem = calloc(1, sizeof(*bo_gem));
2504 pthread_mutex_unlock(&bufmgr_gem->lock);
2507 /* Determine size of bo. The fd-to-handle ioctl really should
2508 * return the size, but it doesn't. If we have kernel 3.12 or
2509 * later, we can lseek on the prime fd to get the size. Older
2510 * kernels will just fail, in which case we fall back to the
2511 * provided (estimated or guess size). */
2512 ret = lseek(prime_fd, 0, SEEK_END);
2514 bo_gem->bo.size = ret;
2516 bo_gem->bo.size = size;
2518 bo_gem->bo.handle = handle;
2519 bo_gem->bo.bufmgr = bufmgr;
2521 bo_gem->gem_handle = handle;
2523 atomic_set(&bo_gem->refcount, 1);
2525 bo_gem->name = "prime";
2526 bo_gem->validate_index = -1;
2527 bo_gem->reloc_tree_fences = 0;
2528 bo_gem->used_as_reloc_target = false;
2529 bo_gem->has_error = false;
2530 bo_gem->reusable = false;
2531 bo_gem->use_48b_address_range = false;
2533 DRMINITLISTHEAD(&bo_gem->vma_list);
2534 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2535 pthread_mutex_unlock(&bufmgr_gem->lock);
2537 memclear(get_tiling);
2538 get_tiling.handle = bo_gem->gem_handle;
2539 ret = drmIoctl(bufmgr_gem->fd,
2540 DRM_IOCTL_I915_GEM_GET_TILING,
2543 DBG("create_from_prime: failed to get tiling: %s\n", strerror(errno));
2544 drm_intel_gem_bo_unreference(&bo_gem->bo);
2547 bo_gem->tiling_mode = get_tiling.tiling_mode;
2548 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
2549 /* XXX stride is unknown */
2550 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2556 drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
2558 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2559 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2561 pthread_mutex_lock(&bufmgr_gem->lock);
2562 if (DRMLISTEMPTY(&bo_gem->name_list))
2563 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2564 pthread_mutex_unlock(&bufmgr_gem->lock);
2566 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
2567 DRM_CLOEXEC, prime_fd) != 0)
2570 bo_gem->reusable = false;
2576 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2578 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2579 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2582 if (!bo_gem->global_name) {
2583 struct drm_gem_flink flink;
2586 flink.handle = bo_gem->gem_handle;
2588 pthread_mutex_lock(&bufmgr_gem->lock);
2590 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
2592 pthread_mutex_unlock(&bufmgr_gem->lock);
2596 bo_gem->global_name = flink.name;
2597 bo_gem->reusable = false;
2599 if (DRMLISTEMPTY(&bo_gem->name_list))
2600 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2601 pthread_mutex_unlock(&bufmgr_gem->lock);
2604 *name = bo_gem->global_name;
2609 * Enables unlimited caching of buffer objects for reuse.
2611 * This is potentially very memory expensive, as the cache at each bucket
2612 * size is only bounded by how many buffers of that size we've managed to have
2613 * in flight at once.
2616 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2618 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2620 bufmgr_gem->bo_reuse = true;
2624 * Enable use of fenced reloc type.
2626 * New code should enable this to avoid unnecessary fence register
2627 * allocation. If this option is not enabled, all relocs will have fence
2628 * register allocated.
2631 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2633 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2635 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2636 bufmgr_gem->fenced_relocs = true;
2640 * Return the additional aperture space required by the tree of buffer objects
2644 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2646 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2650 if (bo == NULL || bo_gem->included_in_check_aperture)
2654 bo_gem->included_in_check_aperture = true;
2656 for (i = 0; i < bo_gem->reloc_count; i++)
2658 drm_intel_gem_bo_get_aperture_space(bo_gem->
2659 reloc_target_info[i].bo);
2665 * Count the number of buffers in this list that need a fence reg
2667 * If the count is greater than the number of available regs, we'll have
2668 * to ask the caller to resubmit a batch with fewer tiled buffers.
2670 * This function over-counts if the same buffer is used multiple times.
2673 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2676 unsigned int total = 0;
2678 for (i = 0; i < count; i++) {
2679 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2684 total += bo_gem->reloc_tree_fences;
2690 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2691 * for the next drm_intel_bufmgr_check_aperture_space() call.
2694 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2696 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2699 if (bo == NULL || !bo_gem->included_in_check_aperture)
2702 bo_gem->included_in_check_aperture = false;
2704 for (i = 0; i < bo_gem->reloc_count; i++)
2705 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2706 reloc_target_info[i].bo);
2710 * Return a conservative estimate for the amount of aperture required
2711 * for a collection of buffers. This may double-count some buffers.
2714 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2717 unsigned int total = 0;
2719 for (i = 0; i < count; i++) {
2720 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2722 total += bo_gem->reloc_tree_size;
2728 * Return the amount of aperture needed for a collection of buffers.
2729 * This avoids double counting any buffers, at the cost of looking
2730 * at every buffer in the set.
2733 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2736 unsigned int total = 0;
2738 for (i = 0; i < count; i++) {
2739 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2740 /* For the first buffer object in the array, we get an
2741 * accurate count back for its reloc_tree size (since nothing
2742 * had been flagged as being counted yet). We can save that
2743 * value out as a more conservative reloc_tree_size that
2744 * avoids double-counting target buffers. Since the first
2745 * buffer happens to usually be the batch buffer in our
2746 * callers, this can pull us back from doing the tree
2747 * walk on every new batch emit.
2750 drm_intel_bo_gem *bo_gem =
2751 (drm_intel_bo_gem *) bo_array[i];
2752 bo_gem->reloc_tree_size = total;
2756 for (i = 0; i < count; i++)
2757 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2762 * Return -1 if the batchbuffer should be flushed before attempting to
2763 * emit rendering referencing the buffers pointed to by bo_array.
2765 * This is required because if we try to emit a batchbuffer with relocations
2766 * to a tree of buffers that won't simultaneously fit in the aperture,
2767 * the rendering will return an error at a point where the software is not
2768 * prepared to recover from it.
2770 * However, we also want to emit the batchbuffer significantly before we reach
2771 * the limit, as a series of batchbuffers each of which references buffers
2772 * covering almost all of the aperture means that at each emit we end up
2773 * waiting to evict a buffer from the last rendering, and we get synchronous
2774 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2775 * get better parallelism.
2778 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2780 drm_intel_bufmgr_gem *bufmgr_gem =
2781 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2782 unsigned int total = 0;
2783 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2786 /* Check for fence reg constraints if necessary */
2787 if (bufmgr_gem->available_fences) {
2788 total_fences = drm_intel_gem_total_fences(bo_array, count);
2789 if (total_fences > bufmgr_gem->available_fences)
2793 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2795 if (total > threshold)
2796 total = drm_intel_gem_compute_batch_space(bo_array, count);
2798 if (total > threshold) {
2799 DBG("check_space: overflowed available aperture, "
2801 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2804 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2805 (int)bufmgr_gem->gtt_size / 1024);
2811 * Disable buffer reuse for objects which are shared with the kernel
2812 * as scanout buffers
2815 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2817 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2819 bo_gem->reusable = false;
2824 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2826 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2828 return bo_gem->reusable;
2832 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2834 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2837 for (i = 0; i < bo_gem->reloc_count; i++) {
2838 if (bo_gem->reloc_target_info[i].bo == target_bo)
2840 if (bo == bo_gem->reloc_target_info[i].bo)
2842 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2850 /** Return true if target_bo is referenced by bo's relocation tree. */
2852 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2854 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2856 if (bo == NULL || target_bo == NULL)
2858 if (target_bo_gem->used_as_reloc_target)
2859 return _drm_intel_gem_bo_references(bo, target_bo);
2864 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2866 unsigned int i = bufmgr_gem->num_buckets;
2868 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2870 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2871 bufmgr_gem->cache_bucket[i].size = size;
2872 bufmgr_gem->num_buckets++;
2876 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2878 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2880 /* OK, so power of two buckets was too wasteful of memory.
2881 * Give 3 other sizes between each power of two, to hopefully
2882 * cover things accurately enough. (The alternative is
2883 * probably to just go for exact matching of sizes, and assume
2884 * that for things like composited window resize the tiled
2885 * width/height alignment and rounding of sizes to pages will
2886 * get us useful cache hit rates anyway)
2888 add_bucket(bufmgr_gem, 4096);
2889 add_bucket(bufmgr_gem, 4096 * 2);
2890 add_bucket(bufmgr_gem, 4096 * 3);
2892 /* Initialize the linked lists for BO reuse cache. */
2893 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2894 add_bucket(bufmgr_gem, size);
2896 add_bucket(bufmgr_gem, size + size * 1 / 4);
2897 add_bucket(bufmgr_gem, size + size * 2 / 4);
2898 add_bucket(bufmgr_gem, size + size * 3 / 4);
2903 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
2905 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2907 bufmgr_gem->vma_max = limit;
2909 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
2913 * Get the PCI ID for the device. This can be overridden by setting the
2914 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
2917 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
2919 char *devid_override;
2922 drm_i915_getparam_t gp;
2924 if (geteuid() == getuid()) {
2925 devid_override = getenv("INTEL_DEVID_OVERRIDE");
2926 if (devid_override) {
2927 bufmgr_gem->no_exec = true;
2928 return strtod(devid_override, NULL);
2933 gp.param = I915_PARAM_CHIPSET_ID;
2935 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2937 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2938 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2944 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
2946 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2948 return bufmgr_gem->pci_device;
2952 * Sets the AUB filename.
2954 * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
2955 * for it to have any effect.
2958 drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
2959 const char *filename)
2964 * Sets up AUB dumping.
2966 * This is a trace file format that can be used with the simulator.
2967 * Packets are emitted in a format somewhat like GPU command packets.
2968 * You can set up a GTT and upload your objects into the referenced
2969 * space, then send off batchbuffers and get BMPs out the other end.
2972 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
2974 fprintf(stderr, "libdrm aub dumping is deprecated.\n\n"
2975 "Use intel_aubdump from intel-gpu-tools instead. Install intel-gpu-tools,\n"
2976 "then run (for example)\n\n"
2977 "\t$ intel_aubdump --output=trace.aub glxgears -geometry 500x500\n\n"
2978 "See the intel_aubdump man page for more details.\n");
2982 drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
2984 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2985 struct drm_i915_gem_context_create create;
2986 drm_intel_context *context = NULL;
2989 context = calloc(1, sizeof(*context));
2994 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
2996 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
3002 context->ctx_id = create.ctx_id;
3003 context->bufmgr = bufmgr;
3009 drm_intel_gem_context_destroy(drm_intel_context *ctx)
3011 drm_intel_bufmgr_gem *bufmgr_gem;
3012 struct drm_i915_gem_context_destroy destroy;
3020 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3021 destroy.ctx_id = ctx->ctx_id;
3022 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
3025 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
3032 drm_intel_get_reset_stats(drm_intel_context *ctx,
3033 uint32_t *reset_count,
3037 drm_intel_bufmgr_gem *bufmgr_gem;
3038 struct drm_i915_reset_stats stats;
3046 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3047 stats.ctx_id = ctx->ctx_id;
3048 ret = drmIoctl(bufmgr_gem->fd,
3049 DRM_IOCTL_I915_GET_RESET_STATS,
3052 if (reset_count != NULL)
3053 *reset_count = stats.reset_count;
3056 *active = stats.batch_active;
3058 if (pending != NULL)
3059 *pending = stats.batch_pending;
3066 drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
3070 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3071 struct drm_i915_reg_read reg_read;
3075 reg_read.offset = offset;
3077 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read);
3079 *result = reg_read.val;
3084 drm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
3086 drm_i915_getparam_t gp;
3090 gp.value = (int*)subslice_total;
3091 gp.param = I915_PARAM_SUBSLICE_TOTAL;
3092 ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3100 drm_intel_get_eu_total(int fd, unsigned int *eu_total)
3102 drm_i915_getparam_t gp;
3106 gp.value = (int*)eu_total;
3107 gp.param = I915_PARAM_EU_TOTAL;
3108 ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3116 * Annotate the given bo for use in aub dumping.
3118 * \param annotations is an array of drm_intel_aub_annotation objects
3119 * describing the type of data in various sections of the bo. Each
3120 * element of the array specifies the type and subtype of a section of
3121 * the bo, and the past-the-end offset of that section. The elements
3122 * of \c annotations must be sorted so that ending_offset is
3125 * \param count is the number of elements in the \c annotations array.
3126 * If \c count is zero, then \c annotations will not be dereferenced.
3128 * Annotations are copied into a private data structure, so caller may
3129 * re-use the memory pointed to by \c annotations after the call
3132 * Annotations are stored for the lifetime of the bo; to reset to the
3133 * default state (no annotations), call this function with a \c count
3137 drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
3138 drm_intel_aub_annotation *annotations,
3143 static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
3144 static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
3146 static drm_intel_bufmgr_gem *
3147 drm_intel_bufmgr_gem_find(int fd)
3149 drm_intel_bufmgr_gem *bufmgr_gem;
3151 DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
3152 if (bufmgr_gem->fd == fd) {
3153 atomic_inc(&bufmgr_gem->refcount);
3162 drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
3164 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3166 if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
3167 pthread_mutex_lock(&bufmgr_list_mutex);
3169 if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
3170 DRMLISTDEL(&bufmgr_gem->managers);
3171 drm_intel_bufmgr_gem_destroy(bufmgr);
3174 pthread_mutex_unlock(&bufmgr_list_mutex);
3179 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3180 * and manage map buffer objections.
3182 * \param fd File descriptor of the opened DRM device.
3185 drm_intel_bufmgr_gem_init(int fd, int batch_size)
3187 drm_intel_bufmgr_gem *bufmgr_gem;
3188 struct drm_i915_gem_get_aperture aperture;
3189 drm_i915_getparam_t gp;
3193 pthread_mutex_lock(&bufmgr_list_mutex);
3195 bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
3199 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
3200 if (bufmgr_gem == NULL)
3203 bufmgr_gem->fd = fd;
3204 atomic_set(&bufmgr_gem->refcount, 1);
3206 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
3213 ret = drmIoctl(bufmgr_gem->fd,
3214 DRM_IOCTL_I915_GEM_GET_APERTURE,
3218 bufmgr_gem->gtt_size = aperture.aper_available_size;
3220 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3222 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
3223 fprintf(stderr, "Assuming %dkB available aperture size.\n"
3224 "May lead to reduced performance or incorrect "
3226 (int)bufmgr_gem->gtt_size / 1024);
3229 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
3231 if (IS_GEN2(bufmgr_gem->pci_device))
3232 bufmgr_gem->gen = 2;
3233 else if (IS_GEN3(bufmgr_gem->pci_device))
3234 bufmgr_gem->gen = 3;
3235 else if (IS_GEN4(bufmgr_gem->pci_device))
3236 bufmgr_gem->gen = 4;
3237 else if (IS_GEN5(bufmgr_gem->pci_device))
3238 bufmgr_gem->gen = 5;
3239 else if (IS_GEN6(bufmgr_gem->pci_device))
3240 bufmgr_gem->gen = 6;
3241 else if (IS_GEN7(bufmgr_gem->pci_device))
3242 bufmgr_gem->gen = 7;
3243 else if (IS_GEN8(bufmgr_gem->pci_device))
3244 bufmgr_gem->gen = 8;
3245 else if (IS_GEN9(bufmgr_gem->pci_device))
3246 bufmgr_gem->gen = 9;
3253 if (IS_GEN3(bufmgr_gem->pci_device) &&
3254 bufmgr_gem->gtt_size > 256*1024*1024) {
3255 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3256 * be used for tiled blits. To simplify the accounting, just
3257 * substract the unmappable part (fixed to 256MB on all known
3258 * gen3 devices) if the kernel advertises it. */
3259 bufmgr_gem->gtt_size -= 256*1024*1024;
3265 gp.param = I915_PARAM_HAS_EXECBUF2;
3266 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3270 gp.param = I915_PARAM_HAS_BSD;
3271 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3272 bufmgr_gem->has_bsd = ret == 0;
3274 gp.param = I915_PARAM_HAS_BLT;
3275 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3276 bufmgr_gem->has_blt = ret == 0;
3278 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
3279 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3280 bufmgr_gem->has_relaxed_fencing = ret == 0;
3282 bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr;
3284 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3285 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3286 bufmgr_gem->has_wait_timeout = ret == 0;
3288 gp.param = I915_PARAM_HAS_LLC;
3289 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3291 /* Kernel does not supports HAS_LLC query, fallback to GPU
3292 * generation detection and assume that we have LLC on GEN6/7
3294 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3295 IS_GEN7(bufmgr_gem->pci_device));
3297 bufmgr_gem->has_llc = *gp.value;
3299 gp.param = I915_PARAM_HAS_VEBOX;
3300 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3301 bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
3303 if (bufmgr_gem->gen < 4) {
3304 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
3305 gp.value = &bufmgr_gem->available_fences;
3306 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3308 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
3310 fprintf(stderr, "param: %d, val: %d\n", gp.param,
3312 bufmgr_gem->available_fences = 0;
3314 /* XXX The kernel reports the total number of fences,
3315 * including any that may be pinned.
3317 * We presume that there will be at least one pinned
3318 * fence for the scanout buffer, but there may be more
3319 * than one scanout and the user may be manually
3320 * pinning buffers. Let's move to execbuffer2 and
3321 * thereby forget the insanity of using fences...
3323 bufmgr_gem->available_fences -= 2;
3324 if (bufmgr_gem->available_fences < 0)
3325 bufmgr_gem->available_fences = 0;
3329 if (bufmgr_gem->gen >= 8) {
3330 gp.param = I915_PARAM_HAS_ALIASING_PPGTT;
3331 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3332 if (ret == 0 && *gp.value == 3)
3333 bufmgr_gem->bufmgr.bo_use_48b_address_range = drm_intel_gem_bo_use_48b_address_range;
3336 /* Let's go with one relocation per every 2 dwords (but round down a bit
3337 * since a power of two will mean an extra page allocation for the reloc
3340 * Every 4 was too few for the blender benchmark.
3342 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
3344 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
3345 bufmgr_gem->bufmgr.bo_alloc_for_render =
3346 drm_intel_gem_bo_alloc_for_render;
3347 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
3348 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
3349 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
3350 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
3351 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
3352 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
3353 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
3354 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
3355 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
3356 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
3357 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
3358 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
3359 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
3360 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
3361 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
3362 /* Use the new one if available */
3364 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
3365 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3367 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
3368 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
3369 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3370 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
3371 bufmgr_gem->bufmgr.debug = 0;
3372 bufmgr_gem->bufmgr.check_aperture_space =
3373 drm_intel_gem_check_aperture_space;
3374 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3375 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
3376 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
3377 drm_intel_gem_get_pipe_from_crtc_id;
3378 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
3380 DRMINITLISTHEAD(&bufmgr_gem->named);
3381 init_cache_buckets(bufmgr_gem);
3383 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
3384 bufmgr_gem->vma_max = -1; /* unlimited by default */
3386 DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
3389 pthread_mutex_unlock(&bufmgr_list_mutex);
3391 return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;