2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
26 * The Iris buffer manager.
28 * XXX: write better comments
31 * - main interface to GEM in the kernel
35 #include <util/u_atomic.h>
42 #include <sys/ioctl.h>
45 #include <sys/types.h>
51 #include "common/intel_aux_map.h"
52 #include "common/intel_clflush.h"
53 #include "dev/intel_debug.h"
54 #include "common/intel_gem.h"
55 #include "dev/intel_device_info.h"
57 #include "util/os_mman.h"
58 #include "util/u_debug.h"
59 #include "util/macros.h"
60 #include "util/hash_table.h"
61 #include "util/list.h"
62 #include "util/os_file.h"
63 #include "util/u_dynarray.h"
65 #include "iris_bufmgr.h"
66 #include "iris_context.h"
68 #include "iris_kmd_backend.h"
69 #include "i915/iris_bufmgr.h"
70 #include "xe/iris_bufmgr.h"
72 #include "drm-uapi/i915_drm.h"
82 /* VALGRIND_FREELIKE_BLOCK unfortunately does not actually undo the earlier
83 * VALGRIND_MALLOCLIKE_BLOCK but instead leaves vg convinced the memory is
84 * leaked. All because it does not call VG(cli_free) from its
85 * VG_USERREQ__FREELIKE_BLOCK handler. Instead of treating the memory like
86 * and allocation, we mark it available for use upon mmapping and remove
89 #define VG_DEFINED(ptr, size) VG(VALGRIND_MAKE_MEM_DEFINED(ptr, size))
90 #define VG_NOACCESS(ptr, size) VG(VALGRIND_MAKE_MEM_NOACCESS(ptr, size))
92 /* On FreeBSD PAGE_SIZE is already defined in
93 * /usr/include/machine/param.h that is indirectly
97 #define PAGE_SIZE 4096
100 #define WARN_ONCE(cond, fmt...) do { \
101 if (unlikely(cond)) { \
102 static bool _warned = false; \
104 fprintf(stderr, "WARNING: "); \
105 fprintf(stderr, fmt); \
111 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
114 * For debugging purposes, this returns a time in seconds.
121 clock_gettime(CLOCK_MONOTONIC, &tp);
123 return tp.tv_sec + tp.tv_nsec / 1000000000.0;
127 atomic_add_unless(int *v, int add, int unless)
130 c = p_atomic_read(v);
131 while (c != unless && (old = p_atomic_cmpxchg(v, c, c + add)) != c)
137 memzone_name(enum iris_memory_zone memzone)
139 const char *names[] = {
140 [IRIS_MEMZONE_SHADER] = "shader",
141 [IRIS_MEMZONE_BINDER] = "binder",
142 [IRIS_MEMZONE_SCRATCH] = "scratchsurf",
143 [IRIS_MEMZONE_SURFACE] = "surface",
144 [IRIS_MEMZONE_DYNAMIC] = "dynamic",
145 [IRIS_MEMZONE_OTHER] = "other",
146 [IRIS_MEMZONE_BORDER_COLOR_POOL] = "bordercolor",
148 assert(memzone < ARRAY_SIZE(names));
149 return names[memzone];
152 struct bo_cache_bucket {
153 /** List of cached BOs. */
154 struct list_head head;
156 /** Size of this bucket, in bytes. */
161 /** File descriptor associated with a handle export. */
164 /** GEM handle in drm_fd */
167 struct list_head link;
170 struct iris_memregion {
171 struct intel_memory_class_instance *region;
175 #define NUM_SLAB_ALLOCATORS 3
182 /** The BO representing the entire slab */
185 /** Array of iris_bo structs representing BOs allocated out of this slab */
186 struct iris_bo *entries;
189 #define BUCKET_ARRAY_SIZE (14 * 4)
193 * List into the list of bufmgr.
195 struct list_head link;
202 simple_mtx_t bo_deps_lock;
204 /** Array of lists of cached gem objects of power-of-two sizes */
205 struct bo_cache_bucket cache_bucket[BUCKET_ARRAY_SIZE];
208 /** Same as cache_bucket, but for local memory gem objects */
209 struct bo_cache_bucket local_cache_bucket[BUCKET_ARRAY_SIZE];
210 int num_local_buckets;
212 /** Same as cache_bucket, but for local-preferred memory gem objects */
213 struct bo_cache_bucket local_preferred_cache_bucket[BUCKET_ARRAY_SIZE];
214 int num_local_preferred_buckets;
218 struct hash_table *name_table;
219 struct hash_table *handle_table;
222 * List of BOs which we've effectively freed, but are hanging on to
223 * until they're idle before closing and returning the VMA.
225 struct list_head zombie_list;
227 struct util_vma_heap vma_allocator[IRIS_MEMZONE_COUNT];
229 struct iris_memregion vram, sys;
231 /* Used only when use_global_vm is true. */
232 uint32_t global_vm_id;
236 struct intel_device_info devinfo;
237 const struct iris_kmd_backend *kmd_backend;
239 bool use_global_vm:1;
241 struct intel_aux_map_context *aux_map_ctx;
243 struct pb_slabs bo_slabs[NUM_SLAB_ALLOCATORS];
245 struct iris_border_color_pool border_color_pool;
248 static simple_mtx_t global_bufmgr_list_mutex = SIMPLE_MTX_INITIALIZER;
249 static struct list_head global_bufmgr_list = {
250 .next = &global_bufmgr_list,
251 .prev = &global_bufmgr_list,
254 static void bo_free(struct iris_bo *bo);
256 static struct iris_bo *
257 find_and_ref_external_bo(struct hash_table *ht, unsigned int key)
259 struct hash_entry *entry = _mesa_hash_table_search(ht, &key);
260 struct iris_bo *bo = entry ? entry->data : NULL;
263 assert(iris_bo_is_external(bo));
264 assert(iris_bo_is_real(bo));
265 assert(!bo->real.reusable);
267 /* Being non-reusable, the BO cannot be in the cache lists, but it
268 * may be in the zombie list if it had reached zero references, but
269 * we hadn't yet closed it...and then reimported the same BO. If it
270 * is, then remove it since it's now been resurrected.
272 if (list_is_linked(&bo->head))
275 iris_bo_reference(bo);
282 bucket_info_for_heap(struct iris_bufmgr *bufmgr, enum iris_heap heap,
283 struct bo_cache_bucket **cache_bucket, int **num_buckets)
286 case IRIS_HEAP_SYSTEM_MEMORY:
287 *cache_bucket = bufmgr->cache_bucket;
288 *num_buckets = &bufmgr->num_buckets;
290 case IRIS_HEAP_DEVICE_LOCAL:
291 *cache_bucket = bufmgr->local_cache_bucket;
292 *num_buckets = &bufmgr->num_local_buckets;
294 case IRIS_HEAP_DEVICE_LOCAL_PREFERRED:
295 *cache_bucket = bufmgr->local_preferred_cache_bucket;
296 *num_buckets = &bufmgr->num_local_preferred_buckets;
300 *cache_bucket = NULL;
302 unreachable("invalid heap");
305 assert(**num_buckets < BUCKET_ARRAY_SIZE);
308 * This function finds the correct bucket fit for the input size.
309 * The function works with O(1) complexity when the requested size
310 * was queried instead of iterating the size through all the buckets.
312 static struct bo_cache_bucket *
313 bucket_for_size(struct iris_bufmgr *bufmgr, uint64_t size,
314 enum iris_heap heap, unsigned flags)
317 /* Protected bo needs special handling during allocation.
318 * Exported and scanout bos also need special handling during allocation
321 if ((flags & BO_ALLOC_PROTECTED) ||
322 ((flags & (BO_ALLOC_SHARED | BO_ALLOC_SCANOUT)) &&
323 bufmgr->devinfo.kmd_type == INTEL_KMD_TYPE_XE))
326 /* Calculating the pages and rounding up to the page size. */
327 const unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
329 /* Row Bucket sizes clz((x-1) | 3) Row Column
330 * in pages stride size
331 * 0: 1 2 3 4 -> 30 30 30 30 4 1
332 * 1: 5 6 7 8 -> 29 29 29 29 4 1
333 * 2: 10 12 14 16 -> 28 28 28 28 8 2
334 * 3: 20 24 28 32 -> 27 27 27 27 16 4
336 const unsigned row = 30 - __builtin_clz((pages - 1) | 3);
337 const unsigned row_max_pages = 4 << row;
339 /* The '& ~2' is the special case for row 1. In row 1, max pages /
340 * 2 is 2, but the previous row maximum is zero (because there is
341 * no previous row). All row maximum sizes are power of 2, so that
342 * is the only case where that bit will be set.
344 const unsigned prev_row_max_pages = (row_max_pages / 2) & ~2;
345 int col_size_log2 = row - 1;
346 col_size_log2 += (col_size_log2 < 0);
348 const unsigned col = (pages - prev_row_max_pages +
349 ((1 << col_size_log2) - 1)) >> col_size_log2;
351 /* Calculating the index based on the row and column. */
352 const unsigned index = (row * 4) + (col - 1);
355 struct bo_cache_bucket *buckets;
356 bucket_info_for_heap(bufmgr, heap, &buckets, &num_buckets);
358 return (index < *num_buckets) ? &buckets[index] : NULL;
361 enum iris_memory_zone
362 iris_memzone_for_address(uint64_t address)
364 STATIC_ASSERT(IRIS_MEMZONE_OTHER_START > IRIS_MEMZONE_DYNAMIC_START);
365 STATIC_ASSERT(IRIS_MEMZONE_SURFACE_START > IRIS_MEMZONE_SCRATCH_START);
366 STATIC_ASSERT(IRIS_MEMZONE_SCRATCH_START == IRIS_MEMZONE_BINDER_START);
367 STATIC_ASSERT(IRIS_MEMZONE_BINDER_START > IRIS_MEMZONE_SHADER_START);
368 STATIC_ASSERT(IRIS_MEMZONE_DYNAMIC_START > IRIS_MEMZONE_SURFACE_START);
369 STATIC_ASSERT(IRIS_BORDER_COLOR_POOL_ADDRESS == IRIS_MEMZONE_DYNAMIC_START);
371 if (address >= IRIS_MEMZONE_OTHER_START)
372 return IRIS_MEMZONE_OTHER;
374 if (address == IRIS_BORDER_COLOR_POOL_ADDRESS)
375 return IRIS_MEMZONE_BORDER_COLOR_POOL;
377 if (address > IRIS_MEMZONE_DYNAMIC_START)
378 return IRIS_MEMZONE_DYNAMIC;
380 if (address >= IRIS_MEMZONE_SURFACE_START)
381 return IRIS_MEMZONE_SURFACE;
383 if (address >= (IRIS_MEMZONE_BINDER_START + IRIS_SCRATCH_ZONE_SIZE))
384 return IRIS_MEMZONE_BINDER;
386 if (address >= IRIS_MEMZONE_SCRATCH_START)
387 return IRIS_MEMZONE_SCRATCH;
389 return IRIS_MEMZONE_SHADER;
393 * Allocate a section of virtual memory for a buffer, assigning an address.
395 * This uses either the bucket allocator for the given size, or the large
396 * object allocator (util_vma).
399 vma_alloc(struct iris_bufmgr *bufmgr,
400 enum iris_memory_zone memzone,
404 simple_mtx_assert_locked(&bufmgr->lock);
406 /* Force minimum alignment based on device requirements */
407 assert((alignment & (alignment - 1)) == 0);
408 alignment = MAX2(alignment, bufmgr->devinfo.mem_alignment);
410 if (memzone == IRIS_MEMZONE_BORDER_COLOR_POOL)
411 return IRIS_BORDER_COLOR_POOL_ADDRESS;
414 util_vma_heap_alloc(&bufmgr->vma_allocator[memzone], size, alignment);
416 assert((addr >> 48ull) == 0);
417 assert((addr % alignment) == 0);
419 return intel_canonical_address(addr);
423 vma_free(struct iris_bufmgr *bufmgr,
427 simple_mtx_assert_locked(&bufmgr->lock);
429 if (address == IRIS_BORDER_COLOR_POOL_ADDRESS)
432 /* Un-canonicalize the address. */
433 address = intel_48b_address(address);
438 enum iris_memory_zone memzone = iris_memzone_for_address(address);
440 assert(memzone < ARRAY_SIZE(bufmgr->vma_allocator));
442 util_vma_heap_free(&bufmgr->vma_allocator[memzone], address, size);
445 /* A timeout of 0 just checks for busyness. */
447 iris_bo_wait_syncobj(struct iris_bo *bo, int64_t timeout_ns)
450 struct iris_bufmgr *bufmgr = bo->bufmgr;
452 /* If we know it's idle, don't bother with the kernel round trip */
456 simple_mtx_lock(&bufmgr->bo_deps_lock);
458 uint32_t handles[bo->deps_size * IRIS_BATCH_COUNT * 2];
459 int handle_count = 0;
461 for (int d = 0; d < bo->deps_size; d++) {
462 for (int b = 0; b < IRIS_BATCH_COUNT; b++) {
463 struct iris_syncobj *r = bo->deps[d].read_syncobjs[b];
464 struct iris_syncobj *w = bo->deps[d].write_syncobjs[b];
466 handles[handle_count++] = r->handle;
468 handles[handle_count++] = w->handle;
472 if (handle_count == 0)
475 /* Unlike the gem wait, negative values are not infinite here. */
476 int64_t timeout_abs = os_time_get_absolute_timeout(timeout_ns);
478 timeout_abs = INT64_MAX;
480 struct drm_syncobj_wait args = {
481 .handles = (uintptr_t) handles,
482 .timeout_nsec = timeout_abs,
483 .count_handles = handle_count,
484 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL,
487 ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
493 /* We just waited everything, so clean all the deps. */
494 for (int d = 0; d < bo->deps_size; d++) {
495 for (int b = 0; b < IRIS_BATCH_COUNT; b++) {
496 iris_syncobj_reference(bufmgr, &bo->deps[d].write_syncobjs[b], NULL);
497 iris_syncobj_reference(bufmgr, &bo->deps[d].read_syncobjs[b], NULL);
502 simple_mtx_unlock(&bufmgr->bo_deps_lock);
507 iris_bo_busy_syncobj(struct iris_bo *bo)
509 return iris_bo_wait_syncobj(bo, 0) == -ETIME;
513 iris_bo_busy(struct iris_bo *bo)
517 switch (iris_bufmgr_get_device_info(bo->bufmgr)->kmd_type) {
518 case INTEL_KMD_TYPE_I915:
519 if (iris_bo_is_external(bo))
520 busy = iris_i915_bo_busy_gem(bo);
522 busy = iris_bo_busy_syncobj(bo);
524 case INTEL_KMD_TYPE_XE:
525 busy = iris_bo_busy_syncobj(bo);
528 unreachable("missing");
538 * Specify the volatility of the buffer.
539 * \param bo Buffer to create a name for
540 * \param state The purgeable status
542 * Use IRIS_MADVICE_DONT_NEED to mark the buffer as purgeable, and it will be
543 * reclaimed under memory pressure. If you subsequently require the buffer,
544 * then you must pass IRIS_MADVICE_WILL_NEED to mark the buffer as required.
546 * Returns true if the buffer was retained, or false if it was discarded
547 * whilst marked as IRIS_MADVICE_DONT_NEED.
550 iris_bo_madvise(struct iris_bo *bo, enum iris_madvice state)
552 /* We can't madvise suballocated BOs. */
553 assert(iris_bo_is_real(bo));
555 return bo->bufmgr->kmd_backend->bo_madvise(bo, state);
558 static struct iris_bo *
561 struct iris_bo *bo = calloc(1, sizeof(*bo));
565 list_inithead(&bo->real.exports);
567 bo->hash = _mesa_hash_pointer(bo);
573 bo_unmap(struct iris_bo *bo)
575 assert(iris_bo_is_real(bo));
577 VG_NOACCESS(bo->real.map, bo->size);
578 os_munmap(bo->real.map, bo->size);
582 static struct pb_slabs *
583 get_slabs(struct iris_bufmgr *bufmgr, uint64_t size)
585 for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
586 struct pb_slabs *slabs = &bufmgr->bo_slabs[i];
588 if (size <= 1ull << (slabs->min_order + slabs->num_orders - 1))
592 unreachable("should have found a valid slab for this size");
595 /* Return the power of two size of a slab entry matching the input size. */
597 get_slab_pot_entry_size(struct iris_bufmgr *bufmgr, unsigned size)
599 unsigned entry_size = util_next_power_of_two(size);
600 unsigned min_entry_size = 1 << bufmgr->bo_slabs[0].min_order;
602 return MAX2(entry_size, min_entry_size);
605 /* Return the slab entry alignment. */
607 get_slab_entry_alignment(struct iris_bufmgr *bufmgr, unsigned size)
609 unsigned entry_size = get_slab_pot_entry_size(bufmgr, size);
611 if (size <= entry_size * 3 / 4)
612 return entry_size / 4;
618 iris_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
620 struct iris_bo *bo = container_of(entry, struct iris_bo, slab.entry);
622 return !iris_bo_busy(bo);
626 iris_slab_free(void *priv, struct pb_slab *pslab)
628 struct iris_bufmgr *bufmgr = priv;
629 struct iris_slab *slab = (void *) pslab;
630 struct intel_aux_map_context *aux_map_ctx = bufmgr->aux_map_ctx;
632 assert(!slab->bo->aux_map_address);
634 /* Since we're freeing the whole slab, all buffers allocated out of it
635 * must be reclaimable. We require buffers to be idle to be reclaimed
636 * (see iris_can_reclaim_slab()), so we know all entries must be idle.
637 * Therefore, we can safely unmap their aux table entries.
639 for (unsigned i = 0; i < pslab->num_entries; i++) {
640 struct iris_bo *bo = &slab->entries[i];
641 if (aux_map_ctx && bo->aux_map_address) {
642 intel_aux_map_unmap_range(aux_map_ctx, bo->address, bo->size);
643 bo->aux_map_address = 0;
646 /* Unref read/write dependency syncobjs and free the array. */
647 for (int d = 0; d < bo->deps_size; d++) {
648 for (int b = 0; b < IRIS_BATCH_COUNT; b++) {
649 iris_syncobj_reference(bufmgr, &bo->deps[d].write_syncobjs[b], NULL);
650 iris_syncobj_reference(bufmgr, &bo->deps[d].read_syncobjs[b], NULL);
656 iris_bo_unreference(slab->bo);
662 static struct pb_slab *
663 iris_slab_alloc(void *priv,
666 unsigned group_index)
668 struct iris_bufmgr *bufmgr = priv;
669 struct iris_slab *slab = calloc(1, sizeof(struct iris_slab));
671 unsigned slab_size = 0;
672 /* We only support slab allocation for IRIS_MEMZONE_OTHER */
673 enum iris_memory_zone memzone = IRIS_MEMZONE_OTHER;
678 struct pb_slabs *slabs = bufmgr->bo_slabs;
680 /* Determine the slab buffer size. */
681 for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
682 unsigned max_entry_size =
683 1 << (slabs[i].min_order + slabs[i].num_orders - 1);
685 if (entry_size <= max_entry_size) {
686 /* The slab size is twice the size of the largest possible entry. */
687 slab_size = max_entry_size * 2;
689 if (!util_is_power_of_two_nonzero(entry_size)) {
690 assert(util_is_power_of_two_nonzero(entry_size * 4 / 3));
692 /* If the entry size is 3/4 of a power of two, we would waste
693 * space and not gain anything if we allocated only twice the
694 * power of two for the backing buffer:
696 * 2 * 3/4 = 1.5 usable with buffer size 2
698 * Allocating 5 times the entry size leads us to the next power
699 * of two and results in a much better memory utilization:
701 * 5 * 3/4 = 3.75 usable with buffer size 4
703 if (entry_size * 5 > slab_size)
704 slab_size = util_next_power_of_two(entry_size * 5);
707 /* The largest slab should have the same size as the PTE fragment
708 * size to get faster address translation.
710 * TODO: move this to intel_device_info?
712 const unsigned pte_size = 2 * 1024 * 1024;
714 if (i == NUM_SLAB_ALLOCATORS - 1 && slab_size < pte_size)
715 slab_size = pte_size;
720 assert(slab_size != 0);
722 if (heap == IRIS_HEAP_SYSTEM_MEMORY)
723 flags = BO_ALLOC_SMEM;
724 else if (heap == IRIS_HEAP_DEVICE_LOCAL)
725 flags = BO_ALLOC_LMEM;
727 flags = BO_ALLOC_PLAIN;
730 iris_bo_alloc(bufmgr, "slab", slab_size, slab_size, memzone, flags);
734 slab_size = slab->bo->size;
736 slab->base.num_entries = slab_size / entry_size;
737 slab->base.num_free = slab->base.num_entries;
738 slab->entry_size = entry_size;
739 slab->entries = calloc(slab->base.num_entries, sizeof(*slab->entries));
743 list_inithead(&slab->base.free);
745 for (unsigned i = 0; i < slab->base.num_entries; i++) {
746 struct iris_bo *bo = &slab->entries[i];
748 bo->size = entry_size;
750 bo->hash = _mesa_hash_pointer(bo);
752 bo->address = slab->bo->address + i * entry_size;
753 bo->aux_map_address = 0;
758 bo->slab.entry.slab = &slab->base;
759 bo->slab.entry.group_index = group_index;
760 bo->slab.entry.entry_size = entry_size;
762 bo->slab.real = iris_get_backing_bo(slab->bo);
764 list_addtail(&bo->slab.entry.head, &slab->base.free);
770 iris_bo_unreference(slab->bo);
776 static enum iris_heap
777 flags_to_heap(struct iris_bufmgr *bufmgr, unsigned flags)
779 if (bufmgr->vram.size > 0 &&
780 !(flags & BO_ALLOC_SMEM) &&
781 !(flags & BO_ALLOC_COHERENT)) {
782 return flags & BO_ALLOC_LMEM ? IRIS_HEAP_DEVICE_LOCAL :
783 IRIS_HEAP_DEVICE_LOCAL_PREFERRED;
785 assert(!(flags & BO_ALLOC_LMEM));
786 return IRIS_HEAP_SYSTEM_MEMORY;
791 zero_bo(struct iris_bufmgr *bufmgr,
795 assert(flags & BO_ALLOC_ZEROED);
797 if (bufmgr->devinfo.has_flat_ccs && (flags & BO_ALLOC_LMEM)) {
798 /* With flat CCS, all allocations in LMEM have memory ranges with
799 * corresponding CCS elements. These elements are only accessible
800 * through GPU commands, but we don't issue GPU commands here.
805 void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
809 memset(map, 0, bo->size);
813 static struct iris_bo *
814 alloc_bo_from_slabs(struct iris_bufmgr *bufmgr,
820 if (flags & BO_ALLOC_NO_SUBALLOC)
823 struct pb_slabs *last_slab = &bufmgr->bo_slabs[NUM_SLAB_ALLOCATORS - 1];
824 unsigned max_slab_entry_size =
825 1 << (last_slab->min_order + last_slab->num_orders - 1);
827 if (size > max_slab_entry_size)
830 struct pb_slab_entry *entry;
832 enum iris_heap heap = flags_to_heap(bufmgr, flags);
834 unsigned alloc_size = size;
836 /* Always use slabs for sizes less than 4 KB because the kernel aligns
837 * everything to 4 KB.
839 if (size < alignment && alignment <= 4 * 1024)
840 alloc_size = alignment;
842 if (alignment > get_slab_entry_alignment(bufmgr, alloc_size)) {
843 /* 3/4 allocations can return too small alignment.
844 * Try again with a power of two allocation size.
846 unsigned pot_size = get_slab_pot_entry_size(bufmgr, alloc_size);
848 if (alignment <= pot_size) {
849 /* This size works but wastes some memory to fulfill the alignment. */
850 alloc_size = pot_size;
852 /* can't fulfill alignment requirements */
857 struct pb_slabs *slabs = get_slabs(bufmgr, alloc_size);
858 entry = pb_slab_alloc(slabs, alloc_size, heap);
860 /* Clean up and try again... */
861 pb_slabs_reclaim(slabs);
863 entry = pb_slab_alloc(slabs, alloc_size, heap);
868 struct iris_bo *bo = container_of(entry, struct iris_bo, slab.entry);
870 if (bo->aux_map_address && bo->bufmgr->aux_map_ctx) {
871 /* This buffer was associated with an aux-buffer range. We only allow
872 * slab allocated buffers to be reclaimed when idle (not in use by an
873 * executing batch). (See iris_can_reclaim_slab().) So we know that
874 * our previous aux mapping is no longer in use, and we can safely
877 intel_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->address,
879 bo->aux_map_address = 0;
882 p_atomic_set(&bo->refcount, 1);
886 /* Zero the contents if necessary. If this fails, fall back to
887 * allocating a fresh BO, which will always be zeroed by the kernel.
889 if ((flags & BO_ALLOC_ZEROED) && !zero_bo(bufmgr, flags, bo)) {
890 pb_slab_free(slabs, &bo->slab.entry);
897 static struct iris_bo *
898 alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
899 struct bo_cache_bucket *bucket,
901 enum iris_memory_zone memzone,
902 enum iris_mmap_mode mmap_mode,
909 struct iris_bo *bo = NULL;
911 simple_mtx_assert_locked(&bufmgr->lock);
913 list_for_each_entry_safe(struct iris_bo, cur, &bucket->head, head) {
914 assert(iris_bo_is_real(cur));
916 /* Find one that's got the right mapping type. We used to swap maps
917 * around but the kernel doesn't allow this on discrete GPUs.
919 if (mmap_mode != cur->real.mmap_mode)
922 /* Try a little harder to find one that's already in the right memzone */
923 if (match_zone && memzone != iris_memzone_for_address(cur->address))
926 /* If the last BO in the cache is busy, there are no idle BOs. Bail,
927 * either falling back to a non-matching memzone, or if that fails,
928 * allocating a fresh buffer.
930 if (iris_bo_busy(cur))
933 list_del(&cur->head);
935 /* Tell the kernel we need this BO and check if it still exist */
936 if (!iris_bo_madvise(cur, IRIS_MADVICE_WILL_NEED)) {
937 /* This BO was purged, throw it out and keep looking. */
942 if (cur->aux_map_address) {
943 /* This buffer was associated with an aux-buffer range. We make sure
944 * that buffers are not reused from the cache while the buffer is (busy)
945 * being used by an executing batch. Since we are here, the buffer is no
946 * longer being used by a batch and the buffer was deleted (in order to
947 * end up in the cache). Therefore its old aux-buffer range can be
948 * removed from the aux-map.
950 if (cur->bufmgr->aux_map_ctx)
951 intel_aux_map_unmap_range(cur->bufmgr->aux_map_ctx, cur->address,
953 cur->aux_map_address = 0;
956 /* If the cached BO isn't in the right memory zone, or the alignment
957 * isn't sufficient, free the old memory and assign it a new address.
959 if (memzone != iris_memzone_for_address(cur->address) ||
960 cur->address % alignment != 0) {
961 if (!bufmgr->kmd_backend->gem_vm_unbind(cur)) {
962 DBG("Unable to unbind vm of buf %u\n", cur->gem_handle);
967 vma_free(bufmgr, cur->address, cur->size);
978 /* Zero the contents if necessary. If this fails, fall back to
979 * allocating a fresh BO, which will always be zeroed by the kernel.
981 if ((flags & BO_ALLOC_ZEROED) && !zero_bo(bufmgr, flags, bo)) {
990 i915_gem_set_domain(struct iris_bufmgr *bufmgr, uint32_t handle,
991 uint32_t read_domains, uint32_t write_domains)
993 struct drm_i915_gem_set_domain sd = {
995 .read_domains = read_domains,
996 .write_domain = write_domains,
998 return intel_ioctl(iris_bufmgr_get_fd(bufmgr),
999 DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd);
1002 static struct iris_bo *
1003 alloc_fresh_bo(struct iris_bufmgr *bufmgr, uint64_t bo_size, unsigned flags)
1005 struct iris_bo *bo = bo_calloc();
1009 bo->real.heap = flags_to_heap(bufmgr, flags);
1011 const struct intel_memory_class_instance *regions[2];
1012 uint16_t num_regions = 0;
1014 if (bufmgr->vram.size > 0) {
1015 switch (bo->real.heap) {
1016 case IRIS_HEAP_DEVICE_LOCAL_PREFERRED:
1017 /* For vram allocations, still use system memory as a fallback. */
1018 regions[num_regions++] = bufmgr->vram.region;
1019 if (!(flags & BO_ALLOC_SCANOUT))
1020 regions[num_regions++] = bufmgr->sys.region;
1022 case IRIS_HEAP_DEVICE_LOCAL:
1023 regions[num_regions++] = bufmgr->vram.region;
1025 case IRIS_HEAP_SYSTEM_MEMORY:
1026 regions[num_regions++] = bufmgr->sys.region;
1029 unreachable("invalid heap for BO");
1032 regions[num_regions++] = bufmgr->sys.region;
1035 bo->gem_handle = bufmgr->kmd_backend->gem_create(bufmgr, regions,
1036 num_regions, bo_size,
1037 bo->real.heap, flags);
1038 if (bo->gem_handle == 0) {
1042 bo->bufmgr = bufmgr;
1046 if (bufmgr->vram.size == 0)
1047 /* Calling set_domain() will allocate pages for the BO outside of the
1048 * struct mutex lock in the kernel, which is more efficient than waiting
1049 * to create them during the first execbuf that uses the BO.
1051 i915_gem_set_domain(bufmgr, bo->gem_handle, I915_GEM_DOMAIN_CPU, 0);
1057 iris_heap_to_string[IRIS_HEAP_MAX] = {
1058 [IRIS_HEAP_SYSTEM_MEMORY] = "system",
1059 [IRIS_HEAP_DEVICE_LOCAL] = "local",
1060 [IRIS_HEAP_DEVICE_LOCAL_PREFERRED] = "local-preferred",
1064 iris_bo_alloc(struct iris_bufmgr *bufmgr,
1068 enum iris_memory_zone memzone,
1072 unsigned int page_size = getpagesize();
1073 enum iris_heap heap = flags_to_heap(bufmgr, flags);
1074 bool local = heap != IRIS_HEAP_SYSTEM_MEMORY;
1075 struct bo_cache_bucket *bucket = bucket_for_size(bufmgr, size, heap, flags);
1077 if (memzone != IRIS_MEMZONE_OTHER || (flags & BO_ALLOC_COHERENT))
1078 flags |= BO_ALLOC_NO_SUBALLOC;
1080 bo = alloc_bo_from_slabs(bufmgr, name, size, alignment, flags);
1085 /* Round the size up to the bucket size, or if we don't have caching
1086 * at this size, a multiple of the page size.
1089 bucket ? bucket->size : MAX2(ALIGN(size, page_size), page_size);
1091 bool is_coherent = bufmgr->devinfo.has_llc ||
1092 (bufmgr->vram.size > 0 && !local) ||
1093 (flags & BO_ALLOC_COHERENT);
1094 bool is_scanout = (flags & BO_ALLOC_SCANOUT) != 0;
1096 enum iris_mmap_mode mmap_mode;
1097 if (!intel_vram_all_mappable(&bufmgr->devinfo) && heap == IRIS_HEAP_DEVICE_LOCAL)
1098 mmap_mode = IRIS_MMAP_NONE;
1099 else if (!local && is_coherent && !is_scanout)
1100 mmap_mode = IRIS_MMAP_WB;
1102 mmap_mode = IRIS_MMAP_WC;
1104 simple_mtx_lock(&bufmgr->lock);
1106 /* Get a buffer out of the cache if available. First, we try to find
1107 * one with a matching memory zone so we can avoid reallocating VMA.
1109 bo = alloc_bo_from_cache(bufmgr, bucket, alignment, memzone, mmap_mode,
1112 /* If that fails, we try for any cached BO, without matching memzone. */
1114 bo = alloc_bo_from_cache(bufmgr, bucket, alignment, memzone, mmap_mode,
1118 simple_mtx_unlock(&bufmgr->lock);
1121 bo = alloc_fresh_bo(bufmgr, bo_size, flags);
1126 if (bo->address == 0ull) {
1127 simple_mtx_lock(&bufmgr->lock);
1128 bo->address = vma_alloc(bufmgr, memzone, bo->size, alignment);
1129 simple_mtx_unlock(&bufmgr->lock);
1131 if (bo->address == 0ull)
1134 if (!bufmgr->kmd_backend->gem_vm_bind(bo))
1139 p_atomic_set(&bo->refcount, 1);
1140 bo->real.reusable = bucket && bufmgr->bo_reuse;
1141 bo->real.protected = flags & BO_ALLOC_PROTECTED;
1143 bo->real.kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
1145 /* By default, capture all driver-internal buffers like shader kernels,
1146 * surface states, dynamic states, border colors, and so on.
1148 if (memzone < IRIS_MEMZONE_OTHER || INTEL_DEBUG(DEBUG_CAPTURE_ALL))
1149 bo->real.kflags |= EXEC_OBJECT_CAPTURE;
1151 assert(bo->real.map == NULL || bo->real.mmap_mode == mmap_mode);
1152 bo->real.mmap_mode = mmap_mode;
1154 /* On integrated GPUs, enable snooping to ensure coherency if needed.
1155 * For discrete, we instead use SMEM and avoid WB maps for coherency.
1157 if ((flags & BO_ALLOC_COHERENT) &&
1158 !bufmgr->devinfo.has_llc && bufmgr->devinfo.has_caching_uapi) {
1159 if (bufmgr->kmd_backend->bo_set_caching(bo, true) != 0)
1162 bo->real.reusable = false;
1165 DBG("bo_create: buf %d (%s) (%s memzone) (%s) %llub\n", bo->gem_handle,
1166 bo->name, memzone_name(memzone), iris_heap_to_string[bo->real.heap],
1167 (unsigned long long) size);
1172 vma_free(bufmgr, bo->address, bo->size);
1174 simple_mtx_lock(&bufmgr->lock);
1176 simple_mtx_unlock(&bufmgr->lock);
1181 iris_bo_close(int fd, uint32_t gem_handle)
1183 struct drm_gem_close close = {
1184 .handle = gem_handle,
1186 return intel_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
1190 iris_bufmgr_bo_close(struct iris_bufmgr *bufmgr, uint32_t gem_handle)
1192 return iris_bo_close(bufmgr->fd, gem_handle);
1196 iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
1197 void *ptr, size_t size,
1198 enum iris_memory_zone memzone)
1206 struct drm_i915_gem_userptr arg = {
1207 .user_ptr = (uintptr_t)ptr,
1209 .flags = bufmgr->devinfo.has_userptr_probe ? I915_USERPTR_PROBE : 0,
1211 if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_USERPTR, &arg))
1213 bo->gem_handle = arg.handle;
1215 if (!bufmgr->devinfo.has_userptr_probe) {
1216 /* Check the buffer for validity before we try and use it in a batch */
1217 if (i915_gem_set_domain(bufmgr, bo->gem_handle, I915_GEM_DOMAIN_CPU, 0))
1225 bo->bufmgr = bufmgr;
1226 bo->real.kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
1228 if (INTEL_DEBUG(DEBUG_CAPTURE_ALL))
1229 bo->real.kflags |= EXEC_OBJECT_CAPTURE;
1231 simple_mtx_lock(&bufmgr->lock);
1232 bo->address = vma_alloc(bufmgr, memzone, size, 1);
1233 simple_mtx_unlock(&bufmgr->lock);
1235 if (bo->address == 0ull)
1238 p_atomic_set(&bo->refcount, 1);
1239 bo->real.userptr = true;
1242 bo->real.mmap_mode = IRIS_MMAP_WB;
1247 iris_bufmgr_bo_close(bufmgr, bo->gem_handle);
1254 * Returns a iris_bo wrapping the given buffer object handle.
1256 * This can be used when one application needs to pass a buffer object
1260 iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
1261 const char *name, unsigned int handle)
1265 /* At the moment most applications only have a few named bo.
1266 * For instance, in a DRI client only the render buffers passed
1267 * between X and the client are named. And since X returns the
1268 * alternating names for the front/back buffer a linear search
1269 * provides a sufficiently fast match.
1271 simple_mtx_lock(&bufmgr->lock);
1272 bo = find_and_ref_external_bo(bufmgr->name_table, handle);
1276 struct drm_gem_open open_arg = { .name = handle };
1277 int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
1279 DBG("Couldn't reference %s handle 0x%08x: %s\n",
1280 name, handle, strerror(errno));
1284 /* Now see if someone has used a prime handle to get this
1285 * object from the kernel before by looking through the list
1286 * again for a matching gem_handle
1288 bo = find_and_ref_external_bo(bufmgr->handle_table, open_arg.handle);
1294 iris_bufmgr_bo_close(bufmgr, open_arg.handle);
1298 p_atomic_set(&bo->refcount, 1);
1300 bo->size = open_arg.size;
1301 bo->bufmgr = bufmgr;
1302 bo->gem_handle = open_arg.handle;
1304 bo->real.global_name = handle;
1305 bo->real.reusable = false;
1306 bo->real.imported = true;
1307 bo->real.mmap_mode = IRIS_MMAP_NONE;
1308 bo->real.kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
1309 if (INTEL_DEBUG(DEBUG_CAPTURE_ALL))
1310 bo->real.kflags |= EXEC_OBJECT_CAPTURE;
1311 bo->address = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);
1312 if (bo->address == 0ull)
1315 if (!bufmgr->kmd_backend->gem_vm_bind(bo))
1318 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1319 _mesa_hash_table_insert(bufmgr->name_table, &bo->real.global_name, bo);
1321 DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name);
1324 simple_mtx_unlock(&bufmgr->lock);
1328 vma_free(bufmgr, bo->address, bo->size);
1331 simple_mtx_unlock(&bufmgr->lock);
1336 bo_close(struct iris_bo *bo)
1338 struct iris_bufmgr *bufmgr = bo->bufmgr;
1340 simple_mtx_assert_locked(&bufmgr->lock);
1341 assert(iris_bo_is_real(bo));
1343 if (iris_bo_is_external(bo)) {
1344 struct hash_entry *entry;
1346 if (bo->real.global_name) {
1347 entry = _mesa_hash_table_search(bufmgr->name_table,
1348 &bo->real.global_name);
1349 _mesa_hash_table_remove(bufmgr->name_table, entry);
1352 entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
1353 _mesa_hash_table_remove(bufmgr->handle_table, entry);
1355 list_for_each_entry_safe(struct bo_export, export, &bo->real.exports, link) {
1356 iris_bo_close(export->drm_fd, export->gem_handle);
1358 list_del(&export->link);
1362 assert(list_is_empty(&bo->real.exports));
1365 /* Unbind and return the VMA for reuse */
1366 if (bufmgr->kmd_backend->gem_vm_unbind(bo))
1367 vma_free(bo->bufmgr, bo->address, bo->size);
1369 DBG("Unable to unbind vm of buf %u\n", bo->gem_handle);
1371 /* Close this object */
1372 if (iris_bufmgr_bo_close(bufmgr, bo->gem_handle) != 0) {
1373 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
1374 bo->gem_handle, bo->name, strerror(errno));
1377 if (bo->aux_map_address && bo->bufmgr->aux_map_ctx) {
1378 intel_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->address,
1382 for (int d = 0; d < bo->deps_size; d++) {
1383 for (int b = 0; b < IRIS_BATCH_COUNT; b++) {
1384 iris_syncobj_reference(bufmgr, &bo->deps[d].write_syncobjs[b], NULL);
1385 iris_syncobj_reference(bufmgr, &bo->deps[d].read_syncobjs[b], NULL);
1394 bo_free(struct iris_bo *bo)
1396 struct iris_bufmgr *bufmgr = bo->bufmgr;
1398 simple_mtx_assert_locked(&bufmgr->lock);
1399 assert(iris_bo_is_real(bo));
1401 if (!bo->real.userptr && bo->real.map)
1404 if (bo->idle || !iris_bo_busy(bo)) {
1407 /* Defer closing the GEM BO and returning the VMA for reuse until the
1408 * BO is idle. Just move it to the dead list for now.
1410 list_addtail(&bo->head, &bufmgr->zombie_list);
1414 /** Frees all cached buffers significantly older than @time. */
1416 cleanup_bo_cache(struct iris_bufmgr *bufmgr, time_t time)
1420 simple_mtx_assert_locked(&bufmgr->lock);
1422 if (bufmgr->time == time)
1425 for (i = 0; i < bufmgr->num_buckets; i++) {
1426 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
1428 list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
1429 if (time - bo->real.free_time <= 1)
1432 list_del(&bo->head);
1438 for (i = 0; i < bufmgr->num_local_buckets; i++) {
1439 struct bo_cache_bucket *bucket = &bufmgr->local_cache_bucket[i];
1441 list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
1442 if (time - bo->real.free_time <= 1)
1445 list_del(&bo->head);
1451 for (i = 0; i < bufmgr->num_local_preferred_buckets; i++) {
1452 struct bo_cache_bucket *bucket = &bufmgr->local_preferred_cache_bucket[i];
1454 list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
1455 if (time - bo->real.free_time <= 1)
1458 list_del(&bo->head);
1464 list_for_each_entry_safe(struct iris_bo, bo, &bufmgr->zombie_list, head) {
1465 /* Stop once we reach a busy BO - all others past this point were
1466 * freed more recently so are likely also busy.
1468 if (!bo->idle && iris_bo_busy(bo))
1471 list_del(&bo->head);
1475 bufmgr->time = time;
1479 bo_unreference_final(struct iris_bo *bo, time_t time)
1481 struct iris_bufmgr *bufmgr = bo->bufmgr;
1482 struct bo_cache_bucket *bucket;
1484 DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);
1486 assert(iris_bo_is_real(bo));
1489 if (bo->real.reusable)
1490 bucket = bucket_for_size(bufmgr, bo->size, bo->real.heap, 0);
1491 /* Put the buffer into our internal cache for reuse if we can. */
1492 if (bucket && iris_bo_madvise(bo, IRIS_MADVICE_DONT_NEED)) {
1493 bo->real.free_time = time;
1496 list_addtail(&bo->head, &bucket->head);
1503 iris_bo_unreference(struct iris_bo *bo)
1508 assert(p_atomic_read(&bo->refcount) > 0);
1510 if (atomic_add_unless(&bo->refcount, -1, 1)) {
1511 struct iris_bufmgr *bufmgr = bo->bufmgr;
1512 struct timespec time;
1514 clock_gettime(CLOCK_MONOTONIC, &time);
1516 if (bo->gem_handle == 0) {
1517 pb_slab_free(get_slabs(bufmgr, bo->size), &bo->slab.entry);
1519 simple_mtx_lock(&bufmgr->lock);
1521 if (p_atomic_dec_zero(&bo->refcount)) {
1522 bo_unreference_final(bo, time.tv_sec);
1523 cleanup_bo_cache(bufmgr, time.tv_sec);
1526 simple_mtx_unlock(&bufmgr->lock);
1532 bo_wait_with_stall_warning(struct util_debug_callback *dbg,
1536 bool busy = dbg && !bo->idle;
1537 double elapsed = unlikely(busy) ? -get_time() : 0.0;
1539 iris_bo_wait_rendering(bo);
1541 if (unlikely(busy)) {
1542 elapsed += get_time();
1543 if (elapsed > 1e-5) /* 0.01ms */ {
1544 perf_debug(dbg, "%s a busy \"%s\" BO stalled and took %.03f ms.\n",
1545 action, bo->name, elapsed * 1000);
1551 print_flags(unsigned flags)
1553 if (flags & MAP_READ)
1555 if (flags & MAP_WRITE)
1557 if (flags & MAP_ASYNC)
1559 if (flags & MAP_PERSISTENT)
1561 if (flags & MAP_COHERENT)
1563 if (flags & MAP_RAW)
1569 iris_bo_map(struct util_debug_callback *dbg,
1570 struct iris_bo *bo, unsigned flags)
1572 struct iris_bufmgr *bufmgr = bo->bufmgr;
1575 if (bo->gem_handle == 0) {
1576 struct iris_bo *real = iris_get_backing_bo(bo);
1577 uint64_t offset = bo->address - real->address;
1578 map = iris_bo_map(dbg, real, flags | MAP_ASYNC) + offset;
1580 assert(bo->real.mmap_mode != IRIS_MMAP_NONE);
1581 if (bo->real.mmap_mode == IRIS_MMAP_NONE)
1584 if (!bo->real.map) {
1585 DBG("iris_bo_map: %d (%s)\n", bo->gem_handle, bo->name);
1586 map = bufmgr->kmd_backend->gem_mmap(bufmgr, bo);
1591 VG_DEFINED(map, bo->size);
1593 if (p_atomic_cmpxchg(&bo->real.map, NULL, map)) {
1594 VG_NOACCESS(map, bo->size);
1595 os_munmap(map, bo->size);
1598 assert(bo->real.map);
1602 DBG("iris_bo_map: %d (%s) -> %p\n",
1603 bo->gem_handle, bo->name, bo->real.map);
1606 if (!(flags & MAP_ASYNC)) {
1607 bo_wait_with_stall_warning(dbg, bo, "memory mapping");
1614 * Waits on a BO for the given amount of time.
1616 * @bo: buffer object to wait for
1617 * @timeout_ns: amount of time to wait in nanoseconds.
1618 * If value is less than 0, an infinite wait will occur.
1620 * Returns 0 if the wait was successful ie. the last batch referencing the
1621 * object has completed within the allotted time. Otherwise some negative return
1622 * value describes the error. Of particular interest is -ETIME when the wait has
1623 * failed to yield the desired result.
1625 * Similar to iris_bo_wait_rendering except a timeout parameter allows
1626 * the operation to give up after a certain amount of time. Another subtle
1627 * difference is the internal locking semantics are different (this variant does
1628 * not hold the lock for the duration of the wait). This makes the wait subject
1629 * to a larger userspace race window.
1631 * The implementation shall wait until the object is no longer actively
1632 * referenced within a batch buffer at the time of the call. The wait will
1633 * not guarantee that the buffer is re-issued via another thread, or an flinked
1634 * handle. Userspace must make sure this race does not occur if such precision
1637 * Note that some kernels have broken the infinite wait for negative values
1638 * promise, upgrade to latest stable kernels if this is the case.
1641 iris_bo_wait(struct iris_bo *bo, int64_t timeout_ns)
1645 switch (iris_bufmgr_get_device_info(bo->bufmgr)->kmd_type) {
1646 case INTEL_KMD_TYPE_I915:
1647 if (iris_bo_is_external(bo))
1648 ret = iris_i915_bo_wait_gem(bo, timeout_ns);
1650 ret = iris_bo_wait_syncobj(bo, timeout_ns);
1652 case INTEL_KMD_TYPE_XE:
1653 ret = iris_bo_wait_syncobj(bo, timeout_ns);
1656 unreachable("missing");
1660 bo->idle = ret == 0;
1665 /** Waits for all GPU rendering with the object to have completed. */
1667 iris_bo_wait_rendering(struct iris_bo *bo)
1669 /* We require a kernel recent enough for WAIT_IOCTL support.
1670 * See intel_init_bufmgr()
1672 iris_bo_wait(bo, -1);
1676 iris_bufmgr_destroy_global_vm(struct iris_bufmgr *bufmgr)
1678 switch (bufmgr->devinfo.kmd_type) {
1679 case INTEL_KMD_TYPE_I915:
1680 /* Nothing to do in i915 */
1682 case INTEL_KMD_TYPE_XE:
1683 iris_xe_destroy_global_vm(bufmgr);
1686 unreachable("missing");
1691 iris_bufmgr_destroy(struct iris_bufmgr *bufmgr)
1693 iris_destroy_border_color_pool(&bufmgr->border_color_pool);
1695 /* Free aux-map buffers */
1696 intel_aux_map_finish(bufmgr->aux_map_ctx);
1698 /* bufmgr will no longer try to free VMA entries in the aux-map */
1699 bufmgr->aux_map_ctx = NULL;
1701 for (int i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
1702 if (bufmgr->bo_slabs[i].groups)
1703 pb_slabs_deinit(&bufmgr->bo_slabs[i]);
1706 simple_mtx_lock(&bufmgr->lock);
1707 /* Free any cached buffer objects we were going to reuse */
1708 for (int i = 0; i < bufmgr->num_buckets; i++) {
1709 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
1711 list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
1712 list_del(&bo->head);
1718 for (int i = 0; i < bufmgr->num_local_buckets; i++) {
1719 struct bo_cache_bucket *bucket = &bufmgr->local_cache_bucket[i];
1721 list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
1722 list_del(&bo->head);
1728 for (int i = 0; i < bufmgr->num_local_preferred_buckets; i++) {
1729 struct bo_cache_bucket *bucket = &bufmgr->local_preferred_cache_bucket[i];
1731 list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
1732 list_del(&bo->head);
1738 /* Close any buffer objects on the dead list. */
1739 list_for_each_entry_safe(struct iris_bo, bo, &bufmgr->zombie_list, head) {
1740 list_del(&bo->head);
1744 _mesa_hash_table_destroy(bufmgr->name_table, NULL);
1745 _mesa_hash_table_destroy(bufmgr->handle_table, NULL);
1747 for (int z = 0; z < IRIS_MEMZONE_COUNT; z++)
1748 util_vma_heap_finish(&bufmgr->vma_allocator[z]);
1750 iris_bufmgr_destroy_global_vm(bufmgr);
1754 simple_mtx_unlock(&bufmgr->lock);
1756 simple_mtx_destroy(&bufmgr->lock);
1757 simple_mtx_destroy(&bufmgr->bo_deps_lock);
1763 iris_gem_get_tiling(struct iris_bo *bo, uint32_t *tiling)
1765 struct iris_bufmgr *bufmgr = bo->bufmgr;
1767 if (!bufmgr->devinfo.has_tiling_uapi) {
1768 *tiling = I915_TILING_NONE;
1772 struct drm_i915_gem_get_tiling ti = { .handle = bo->gem_handle };
1773 int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &ti);
1776 DBG("gem_get_tiling failed for BO %u: %s\n",
1777 bo->gem_handle, strerror(errno));
1780 *tiling = ti.tiling_mode;
1786 iris_gem_set_tiling(struct iris_bo *bo, const struct isl_surf *surf)
1788 struct iris_bufmgr *bufmgr = bo->bufmgr;
1789 uint32_t tiling_mode = isl_tiling_to_i915_tiling(surf->tiling);
1792 /* If we can't do map_gtt, the set/get_tiling API isn't useful. And it's
1793 * actually not supported by the kernel in those cases.
1795 if (!bufmgr->devinfo.has_tiling_uapi)
1798 /* GEM_SET_TILING is slightly broken and overwrites the input on the
1799 * error path, so we have to open code intel_ioctl().
1802 struct drm_i915_gem_set_tiling set_tiling = {
1803 .handle = bo->gem_handle,
1804 .tiling_mode = tiling_mode,
1805 .stride = surf->row_pitch_B,
1807 ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
1808 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1811 DBG("gem_set_tiling failed for BO %u: %s\n",
1812 bo->gem_handle, strerror(errno));
1819 iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd)
1824 simple_mtx_lock(&bufmgr->lock);
1825 int ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle);
1827 DBG("import_dmabuf: failed to obtain handle from fd: %s\n",
1829 simple_mtx_unlock(&bufmgr->lock);
1834 * See if the kernel has already returned this buffer to us. Just as
1835 * for named buffers, we must not create two bo's pointing at the same
1838 bo = find_and_ref_external_bo(bufmgr->handle_table, handle);
1846 p_atomic_set(&bo->refcount, 1);
1848 /* Determine size of bo. The fd-to-handle ioctl really should
1849 * return the size, but it doesn't. If we have kernel 3.12 or
1850 * later, we can lseek on the prime fd to get the size. Older
1851 * kernels will just fail, in which case we fall back to the
1852 * provided (estimated or guess size). */
1853 ret = lseek(prime_fd, 0, SEEK_END);
1857 bo->bufmgr = bufmgr;
1859 bo->real.reusable = false;
1860 bo->real.imported = true;
1861 bo->real.mmap_mode = IRIS_MMAP_NONE;
1862 bo->real.kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
1863 if (INTEL_DEBUG(DEBUG_CAPTURE_ALL))
1864 bo->real.kflags |= EXEC_OBJECT_CAPTURE;
1865 bo->gem_handle = handle;
1867 /* From the Bspec, Memory Compression - Gfx12:
1869 * The base address for the surface has to be 64K page aligned and the
1870 * surface is expected to be padded in the virtual domain to be 4 4K
1873 * The dmabuf may contain a compressed surface. Align the BO to 64KB just
1874 * in case. We always align to 64KB even on platforms where we don't need
1875 * to, because it's a fairly reasonable thing to do anyway.
1877 bo->address = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 64 * 1024);
1878 if (bo->address == 0ull)
1881 if (!bufmgr->kmd_backend->gem_vm_bind(bo))
1884 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1887 simple_mtx_unlock(&bufmgr->lock);
1891 vma_free(bufmgr, bo->address, bo->size);
1894 simple_mtx_unlock(&bufmgr->lock);
1899 iris_bo_mark_exported_locked(struct iris_bo *bo)
1901 struct iris_bufmgr *bufmgr = bo->bufmgr;
1903 /* We cannot export suballocated BOs. */
1904 assert(iris_bo_is_real(bo));
1905 simple_mtx_assert_locked(&bufmgr->lock);
1907 if (!iris_bo_is_external(bo))
1908 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1910 if (!bo->real.exported) {
1911 /* If a BO is going to be used externally, it could be sent to the
1912 * display HW. So make sure our CPU mappings don't assume cache
1913 * coherency since display is outside that cache.
1915 bo->real.exported = true;
1916 bo->real.reusable = false;
1921 iris_bo_mark_exported(struct iris_bo *bo)
1923 struct iris_bufmgr *bufmgr = bo->bufmgr;
1925 /* We cannot export suballocated BOs. */
1926 assert(iris_bo_is_real(bo));
1928 if (bo->real.exported) {
1929 assert(!bo->real.reusable);
1933 simple_mtx_lock(&bufmgr->lock);
1934 iris_bo_mark_exported_locked(bo);
1935 simple_mtx_unlock(&bufmgr->lock);
1939 iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd)
1941 struct iris_bufmgr *bufmgr = bo->bufmgr;
1943 /* We cannot export suballocated BOs. */
1944 assert(iris_bo_is_real(bo));
1946 if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
1947 DRM_CLOEXEC | DRM_RDWR, prime_fd) != 0)
1950 iris_bo_mark_exported(bo);
1956 iris_bo_export_gem_handle(struct iris_bo *bo)
1958 /* We cannot export suballocated BOs. */
1959 assert(iris_bo_is_real(bo));
1961 iris_bo_mark_exported(bo);
1963 return bo->gem_handle;
1967 iris_bo_flink(struct iris_bo *bo, uint32_t *name)
1969 struct iris_bufmgr *bufmgr = bo->bufmgr;
1971 /* We cannot export suballocated BOs. */
1972 assert(iris_bo_is_real(bo));
1974 if (!bo->real.global_name) {
1975 struct drm_gem_flink flink = { .handle = bo->gem_handle };
1977 if (intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
1980 simple_mtx_lock(&bufmgr->lock);
1981 if (!bo->real.global_name) {
1982 iris_bo_mark_exported_locked(bo);
1983 bo->real.global_name = flink.name;
1984 _mesa_hash_table_insert(bufmgr->name_table, &bo->real.global_name, bo);
1986 simple_mtx_unlock(&bufmgr->lock);
1989 *name = bo->real.global_name;
1994 iris_bo_export_gem_handle_for_device(struct iris_bo *bo, int drm_fd,
1995 uint32_t *out_handle)
1997 /* We cannot export suballocated BOs. */
1998 assert(iris_bo_is_real(bo));
2000 /* Only add the new GEM handle to the list of export if it belongs to a
2001 * different GEM device. Otherwise we might close the same buffer multiple
2004 struct iris_bufmgr *bufmgr = bo->bufmgr;
2005 int ret = os_same_file_description(drm_fd, bufmgr->fd);
2007 "Kernel has no file descriptor comparison support: %s\n",
2010 *out_handle = iris_bo_export_gem_handle(bo);
2014 struct bo_export *export = calloc(1, sizeof(*export));
2018 export->drm_fd = drm_fd;
2021 int err = iris_bo_export_dmabuf(bo, &dmabuf_fd);
2027 simple_mtx_lock(&bufmgr->lock);
2028 err = drmPrimeFDToHandle(drm_fd, dmabuf_fd, &export->gem_handle);
2031 simple_mtx_unlock(&bufmgr->lock);
2037 list_for_each_entry(struct bo_export, iter, &bo->real.exports, link) {
2038 if (iter->drm_fd != drm_fd)
2040 /* Here we assume that for a given DRM fd, we'll always get back the
2041 * same GEM handle for a given buffer.
2043 assert(iter->gem_handle == export->gem_handle);
2050 list_addtail(&export->link, &bo->real.exports);
2052 simple_mtx_unlock(&bufmgr->lock);
2054 *out_handle = export->gem_handle;
2060 add_bucket(struct iris_bufmgr *bufmgr, int size, enum iris_heap heap)
2063 struct bo_cache_bucket *buckets;
2064 bucket_info_for_heap(bufmgr, heap, &buckets, &num_buckets);
2066 unsigned int i = (*num_buckets)++;
2068 list_inithead(&buckets[i].head);
2069 buckets[i].size = size;
2071 assert(bucket_for_size(bufmgr, size, heap, 0) == &buckets[i]);
2072 assert(bucket_for_size(bufmgr, size - 2048, heap, 0) == &buckets[i]);
2073 assert(bucket_for_size(bufmgr, size + 1, heap, 0) != &buckets[i]);
2077 init_cache_buckets(struct iris_bufmgr *bufmgr, enum iris_heap heap)
2079 uint64_t size, cache_max_size = 64 * 1024 * 1024;
2081 /* OK, so power of two buckets was too wasteful of memory.
2082 * Give 3 other sizes between each power of two, to hopefully
2083 * cover things accurately enough. (The alternative is
2084 * probably to just go for exact matching of sizes, and assume
2085 * that for things like composited window resize the tiled
2086 * width/height alignment and rounding of sizes to pages will
2087 * get us useful cache hit rates anyway)
2089 add_bucket(bufmgr, PAGE_SIZE, heap);
2090 add_bucket(bufmgr, PAGE_SIZE * 2, heap);
2091 add_bucket(bufmgr, PAGE_SIZE * 3, heap);
2093 /* Initialize the linked lists for BO reuse cache. */
2094 for (size = 4 * PAGE_SIZE; size <= cache_max_size; size *= 2) {
2095 add_bucket(bufmgr, size, heap);
2097 add_bucket(bufmgr, size + size * 1 / 4, heap);
2098 add_bucket(bufmgr, size + size * 2 / 4, heap);
2099 add_bucket(bufmgr, size + size * 3 / 4, heap);
2103 static struct intel_buffer *
2104 intel_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
2106 struct intel_buffer *buf = malloc(sizeof(struct intel_buffer));
2110 struct iris_bufmgr *bufmgr = (struct iris_bufmgr *)driver_ctx;
2112 unsigned int page_size = getpagesize();
2113 size = MAX2(ALIGN(size, page_size), page_size);
2115 struct iris_bo *bo = alloc_fresh_bo(bufmgr, size, 0);
2121 simple_mtx_lock(&bufmgr->lock);
2123 bo->address = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 64 * 1024);
2124 if (bo->address == 0ull)
2127 if (!bufmgr->kmd_backend->gem_vm_bind(bo))
2130 simple_mtx_unlock(&bufmgr->lock);
2132 bo->name = "aux-map";
2133 p_atomic_set(&bo->refcount, 1);
2135 bo->real.kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED |
2136 EXEC_OBJECT_CAPTURE;
2137 bo->real.mmap_mode =
2138 bo->real.heap != IRIS_HEAP_SYSTEM_MEMORY ? IRIS_MMAP_WC : IRIS_MMAP_WB;
2140 buf->driver_bo = bo;
2141 buf->gpu = bo->address;
2142 buf->gpu_end = buf->gpu + bo->size;
2143 buf->map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
2147 vma_free(bufmgr, bo->address, bo->size);
2151 simple_mtx_unlock(&bufmgr->lock);
2156 intel_aux_map_buffer_free(void *driver_ctx, struct intel_buffer *buffer)
2158 iris_bo_unreference((struct iris_bo*)buffer->driver_bo);
2162 static struct intel_mapped_pinned_buffer_alloc aux_map_allocator = {
2163 .alloc = intel_aux_map_buffer_alloc,
2164 .free = intel_aux_map_buffer_free,
2168 iris_bufmgr_get_meminfo(struct iris_bufmgr *bufmgr,
2169 struct intel_device_info *devinfo)
2171 bufmgr->sys.region = &devinfo->mem.sram.mem;
2172 bufmgr->sys.size = devinfo->mem.sram.mappable.size;
2174 bufmgr->vram.region = &devinfo->mem.vram.mem;
2175 bufmgr->vram.size = devinfo->mem.vram.mappable.size;
2181 iris_bufmgr_init_global_vm(struct iris_bufmgr *bufmgr)
2183 switch (bufmgr->devinfo.kmd_type) {
2184 case INTEL_KMD_TYPE_I915:
2185 bufmgr->use_global_vm = iris_i915_init_global_vm(bufmgr, &bufmgr->global_vm_id);
2186 /* i915 don't require VM, so returning true even if use_global_vm is false */
2188 case INTEL_KMD_TYPE_XE:
2189 bufmgr->use_global_vm = iris_xe_init_global_vm(bufmgr, &bufmgr->global_vm_id);
2190 /* Xe requires VM */
2191 return bufmgr->use_global_vm;
2193 unreachable("missing");
2199 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
2200 * and manage map buffer objections.
2202 * \param fd File descriptor of the opened DRM device.
2204 static struct iris_bufmgr *
2205 iris_bufmgr_create(struct intel_device_info *devinfo, int fd, bool bo_reuse)
2207 if (devinfo->gtt_size <= IRIS_MEMZONE_OTHER_START)
2210 struct iris_bufmgr *bufmgr = calloc(1, sizeof(*bufmgr));
2214 /* Handles to buffer objects belong to the device fd and are not
2215 * reference counted by the kernel. If the same fd is used by
2216 * multiple parties (threads sharing the same screen bufmgr, or
2217 * even worse the same device fd passed to multiple libraries)
2218 * ownership of those handles is shared by those independent parties.
2220 * Don't do this! Ensure that each library/bufmgr has its own device
2221 * fd so that its namespace does not clash with another.
2223 bufmgr->fd = os_dupfd_cloexec(fd);
2224 if (bufmgr->fd == -1)
2227 p_atomic_set(&bufmgr->refcount, 1);
2229 simple_mtx_init(&bufmgr->lock, mtx_plain);
2230 simple_mtx_init(&bufmgr->bo_deps_lock, mtx_plain);
2232 list_inithead(&bufmgr->zombie_list);
2234 bufmgr->devinfo = *devinfo;
2235 devinfo = &bufmgr->devinfo;
2236 bufmgr->bo_reuse = bo_reuse;
2237 iris_bufmgr_get_meminfo(bufmgr, devinfo);
2238 bufmgr->kmd_backend = iris_kmd_backend_get(devinfo->kmd_type);
2240 struct intel_query_engine_info *engine_info;
2241 engine_info = intel_engine_get_info(bufmgr->fd, bufmgr->devinfo.kmd_type);
2243 goto error_engine_info;
2244 bufmgr->devinfo.has_compute_engine = intel_engines_count(engine_info,
2245 INTEL_ENGINE_CLASS_COMPUTE);
2248 if (!iris_bufmgr_init_global_vm(bufmgr))
2251 STATIC_ASSERT(IRIS_MEMZONE_SHADER_START == 0ull);
2252 const uint64_t _4GB = 1ull << 32;
2253 const uint64_t _2GB = 1ul << 31;
2255 /* The STATE_BASE_ADDRESS size field can only hold 1 page shy of 4GB */
2256 const uint64_t _4GB_minus_1 = _4GB - PAGE_SIZE;
2258 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SHADER],
2259 PAGE_SIZE, _4GB_minus_1 - PAGE_SIZE);
2260 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_BINDER],
2261 IRIS_MEMZONE_BINDER_START + IRIS_SCRATCH_ZONE_SIZE,
2262 IRIS_BINDER_ZONE_SIZE - IRIS_SCRATCH_ZONE_SIZE);
2263 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SCRATCH],
2264 IRIS_MEMZONE_SCRATCH_START, IRIS_SCRATCH_ZONE_SIZE);
2265 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SURFACE],
2266 IRIS_MEMZONE_SURFACE_START, _4GB_minus_1 -
2267 IRIS_BINDER_ZONE_SIZE - IRIS_SCRATCH_ZONE_SIZE);
2269 /* Wa_2209859288: the Tigerlake PRM's workarounds volume says:
2271 * "PSDunit is dropping MSB of the blend state pointer from SD FIFO"
2272 * "Limit the Blend State Pointer to < 2G"
2274 * We restrict the dynamic state pool to 2GB so that we don't ever get a
2275 * BLEND_STATE pointer with the MSB set. We aren't likely to need the
2276 * full 4GB for dynamic state anyway.
2278 const uint64_t dynamic_pool_size =
2279 (devinfo->ver >= 12 ? _2GB : _4GB_minus_1) - IRIS_BORDER_COLOR_POOL_SIZE;
2280 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_DYNAMIC],
2281 IRIS_MEMZONE_DYNAMIC_START + IRIS_BORDER_COLOR_POOL_SIZE,
2284 /* Leave the last 4GB out of the high vma range, so that no state
2285 * base address + size can overflow 48 bits.
2287 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_OTHER],
2288 IRIS_MEMZONE_OTHER_START,
2289 (devinfo->gtt_size - _4GB) - IRIS_MEMZONE_OTHER_START);
2291 init_cache_buckets(bufmgr, IRIS_HEAP_SYSTEM_MEMORY);
2292 init_cache_buckets(bufmgr, IRIS_HEAP_DEVICE_LOCAL);
2293 init_cache_buckets(bufmgr, IRIS_HEAP_DEVICE_LOCAL_PREFERRED);
2295 unsigned min_slab_order = 8; /* 256 bytes */
2296 unsigned max_slab_order = 20; /* 1 MB (slab size = 2 MB) */
2297 unsigned num_slab_orders_per_allocator =
2298 (max_slab_order - min_slab_order) / NUM_SLAB_ALLOCATORS;
2300 /* Divide the size order range among slab managers. */
2301 for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
2302 unsigned min_order = min_slab_order;
2303 unsigned max_order =
2304 MIN2(min_order + num_slab_orders_per_allocator, max_slab_order);
2306 if (!pb_slabs_init(&bufmgr->bo_slabs[i], min_order, max_order,
2307 IRIS_HEAP_MAX, true, bufmgr,
2308 iris_can_reclaim_slab,
2310 (void *) iris_slab_free)) {
2311 goto error_slabs_init;
2313 min_slab_order = max_order + 1;
2316 bufmgr->name_table =
2317 _mesa_hash_table_create(NULL, _mesa_hash_uint, _mesa_key_uint_equal);
2318 bufmgr->handle_table =
2319 _mesa_hash_table_create(NULL, _mesa_hash_uint, _mesa_key_uint_equal);
2321 if (devinfo->has_aux_map) {
2322 bufmgr->aux_map_ctx = intel_aux_map_init(bufmgr, &aux_map_allocator,
2324 assert(bufmgr->aux_map_ctx);
2327 iris_init_border_color_pool(bufmgr, &bufmgr->border_color_pool);
2332 for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
2333 if (!bufmgr->bo_slabs[i].groups)
2336 pb_slabs_deinit(&bufmgr->bo_slabs[i]);
2338 iris_bufmgr_destroy_global_vm(bufmgr);
2347 static struct iris_bufmgr *
2348 iris_bufmgr_ref(struct iris_bufmgr *bufmgr)
2350 p_atomic_inc(&bufmgr->refcount);
2355 iris_bufmgr_unref(struct iris_bufmgr *bufmgr)
2357 simple_mtx_lock(&global_bufmgr_list_mutex);
2358 if (p_atomic_dec_zero(&bufmgr->refcount)) {
2359 list_del(&bufmgr->link);
2360 iris_bufmgr_destroy(bufmgr);
2362 simple_mtx_unlock(&global_bufmgr_list_mutex);
2365 /** Returns a new unique id, to be used by screens. */
2367 iris_bufmgr_create_screen_id(struct iris_bufmgr *bufmgr)
2369 return p_atomic_inc_return(&bufmgr->next_screen_id) - 1;
2373 * Gets an already existing GEM buffer manager or create a new one.
2375 * \param fd File descriptor of the opened DRM device.
2377 struct iris_bufmgr *
2378 iris_bufmgr_get_for_fd(int fd, bool bo_reuse)
2380 struct intel_device_info devinfo;
2386 struct iris_bufmgr *bufmgr = NULL;
2388 simple_mtx_lock(&global_bufmgr_list_mutex);
2389 list_for_each_entry(struct iris_bufmgr, iter_bufmgr, &global_bufmgr_list, link) {
2390 struct stat iter_st;
2391 if (fstat(iter_bufmgr->fd, &iter_st))
2394 if (st.st_rdev == iter_st.st_rdev) {
2395 assert(iter_bufmgr->bo_reuse == bo_reuse);
2396 bufmgr = iris_bufmgr_ref(iter_bufmgr);
2401 if (!intel_get_device_info_from_fd(fd, &devinfo))
2404 if (devinfo.ver < 8 || devinfo.platform == INTEL_PLATFORM_CHV)
2407 bufmgr = iris_bufmgr_create(&devinfo, fd, bo_reuse);
2409 list_addtail(&bufmgr->link, &global_bufmgr_list);
2412 simple_mtx_unlock(&global_bufmgr_list_mutex);
2418 iris_bufmgr_get_fd(struct iris_bufmgr *bufmgr)
2424 iris_bufmgr_get_aux_map_context(struct iris_bufmgr *bufmgr)
2426 return bufmgr->aux_map_ctx;
2430 iris_bufmgr_get_bo_deps_lock(struct iris_bufmgr *bufmgr)
2432 return &bufmgr->bo_deps_lock;
2435 struct iris_border_color_pool *
2436 iris_bufmgr_get_border_color_pool(struct iris_bufmgr *bufmgr)
2438 return &bufmgr->border_color_pool;
2442 iris_bufmgr_vram_size(struct iris_bufmgr *bufmgr)
2444 return bufmgr->vram.size;
2448 iris_bufmgr_sram_size(struct iris_bufmgr *bufmgr)
2450 return bufmgr->sys.size;
2453 const struct intel_device_info *
2454 iris_bufmgr_get_device_info(struct iris_bufmgr *bufmgr)
2456 return &bufmgr->devinfo;
2459 const struct iris_kmd_backend *
2460 iris_bufmgr_get_kernel_driver_backend(struct iris_bufmgr *bufmgr)
2462 return bufmgr->kmd_backend;
2466 iris_bufmgr_get_global_vm_id(struct iris_bufmgr *bufmgr)
2468 return bufmgr->global_vm_id;
2472 iris_bufmgr_use_global_vm_id(struct iris_bufmgr *bufmgr)
2474 return bufmgr->use_global_vm;