1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
53 #include <sys/types.h>
57 #include "libdrm_lists.h"
58 #include "intel_bufmgr.h"
59 #include "intel_bufmgr_priv.h"
60 #include "intel_chipset.h"
65 #define DBG(...) do { \
66 if (bufmgr_gem->bufmgr.debug) \
67 fprintf(stderr, __VA_ARGS__); \
70 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
72 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
74 struct drm_intel_gem_bo_bucket {
79 typedef struct _drm_intel_bufmgr_gem {
80 drm_intel_bufmgr bufmgr;
88 struct drm_i915_gem_exec_object *exec_objects;
89 struct drm_i915_gem_exec_object2 *exec2_objects;
90 drm_intel_bo **exec_bos;
94 /** Array of lists of cached gem objects of power-of-two sizes */
95 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
100 drmMMListHead vma_cache;
101 int vma_count, vma_open, vma_max;
104 int available_fences;
107 unsigned int has_bsd : 1;
108 unsigned int has_blt : 1;
109 unsigned int has_relaxed_fencing : 1;
110 unsigned int bo_reuse : 1;
112 } drm_intel_bufmgr_gem;
114 #define DRM_INTEL_RELOC_FENCE (1<<0)
116 typedef struct _drm_intel_reloc_target_info {
119 } drm_intel_reloc_target;
121 struct _drm_intel_bo_gem {
129 * Kenel-assigned global name for this object
131 unsigned int global_name;
132 drmMMListHead name_list;
135 * Index of the buffer within the validation list while preparing a
136 * batchbuffer execution.
141 * Current tiling mode
143 uint32_t tiling_mode;
144 uint32_t swizzle_mode;
145 unsigned long stride;
149 /** Array passed to the DRM containing relocation information. */
150 struct drm_i915_gem_relocation_entry *relocs;
152 * Array of info structs corresponding to relocs[i].target_handle etc
154 drm_intel_reloc_target *reloc_target_info;
155 /** Number of entries in relocs */
157 /** Mapped address for the buffer, saved across map/unmap cycles */
159 /** GTT virtual address for the buffer, saved across map/unmap cycles */
162 drmMMListHead vma_list;
168 * Boolean of whether this BO and its children have been included in
169 * the current drm_intel_bufmgr_check_aperture_space() total.
171 bool included_in_check_aperture;
174 * Boolean of whether this buffer has been used as a relocation
175 * target and had its size accounted for, and thus can't have any
176 * further relocations added to it.
178 bool used_as_reloc_target;
181 * Boolean of whether we have encountered an error whilst building the relocation tree.
186 * Boolean of whether this buffer can be re-used
191 * Size in bytes of this buffer and its relocation descendents.
193 * Used to avoid costly tree walking in
194 * drm_intel_bufmgr_check_aperture in the common case.
199 * Number of potential fence registers required by this buffer and its
202 int reloc_tree_fences;
204 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
205 bool mapped_cpu_write;
209 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
212 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
215 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
216 uint32_t * swizzle_mode);
219 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
220 uint32_t tiling_mode,
223 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
226 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
228 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
231 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
232 uint32_t *tiling_mode)
234 unsigned long min_size, max_size;
237 if (*tiling_mode == I915_TILING_NONE)
240 /* 965+ just need multiples of page size for tiling */
241 if (bufmgr_gem->gen >= 4)
242 return ROUND_UP_TO(size, 4096);
244 /* Older chips need powers of two, of at least 512k or 1M */
245 if (bufmgr_gem->gen == 3) {
246 min_size = 1024*1024;
247 max_size = 128*1024*1024;
250 max_size = 64*1024*1024;
253 if (size > max_size) {
254 *tiling_mode = I915_TILING_NONE;
258 /* Do we need to allocate every page for the fence? */
259 if (bufmgr_gem->has_relaxed_fencing)
260 return ROUND_UP_TO(size, 4096);
262 for (i = min_size; i < size; i <<= 1)
269 * Round a given pitch up to the minimum required for X tiling on a
270 * given chip. We use 512 as the minimum to allow for a later tiling
274 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
275 unsigned long pitch, uint32_t *tiling_mode)
277 unsigned long tile_width;
280 /* If untiled, then just align it so that we can do rendering
281 * to it with the 3D engine.
283 if (*tiling_mode == I915_TILING_NONE)
284 return ALIGN(pitch, 64);
286 if (*tiling_mode == I915_TILING_X
287 || (IS_915(bufmgr_gem) && *tiling_mode == I915_TILING_Y))
292 /* 965 is flexible */
293 if (bufmgr_gem->gen >= 4)
294 return ROUND_UP_TO(pitch, tile_width);
296 /* The older hardware has a maximum pitch of 8192 with tiled
297 * surfaces, so fallback to untiled if it's too large.
300 *tiling_mode = I915_TILING_NONE;
301 return ALIGN(pitch, 64);
304 /* Pre-965 needs power of two tile width */
305 for (i = tile_width; i < pitch; i <<= 1)
311 static struct drm_intel_gem_bo_bucket *
312 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
317 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
318 struct drm_intel_gem_bo_bucket *bucket =
319 &bufmgr_gem->cache_bucket[i];
320 if (bucket->size >= size) {
329 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
333 for (i = 0; i < bufmgr_gem->exec_count; i++) {
334 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
335 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
337 if (bo_gem->relocs == NULL) {
338 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
343 for (j = 0; j < bo_gem->reloc_count; j++) {
344 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
345 drm_intel_bo_gem *target_gem =
346 (drm_intel_bo_gem *) target_bo;
348 DBG("%2d: %d (%s)@0x%08llx -> "
349 "%d (%s)@0x%08lx + 0x%08x\n",
351 bo_gem->gem_handle, bo_gem->name,
352 (unsigned long long)bo_gem->relocs[j].offset,
353 target_gem->gem_handle,
356 bo_gem->relocs[j].delta);
362 drm_intel_gem_bo_reference(drm_intel_bo *bo)
364 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
366 atomic_inc(&bo_gem->refcount);
370 * Adds the given buffer to the list of buffers to be validated (moved into the
371 * appropriate memory type) with the next batch submission.
373 * If a buffer is validated multiple times in a batch submission, it ends up
374 * with the intersection of the memory type flags and the union of the
378 drm_intel_add_validate_buffer(drm_intel_bo *bo)
380 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
381 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
384 if (bo_gem->validate_index != -1)
387 /* Extend the array of validation entries as necessary. */
388 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
389 int new_size = bufmgr_gem->exec_size * 2;
394 bufmgr_gem->exec_objects =
395 realloc(bufmgr_gem->exec_objects,
396 sizeof(*bufmgr_gem->exec_objects) * new_size);
397 bufmgr_gem->exec_bos =
398 realloc(bufmgr_gem->exec_bos,
399 sizeof(*bufmgr_gem->exec_bos) * new_size);
400 bufmgr_gem->exec_size = new_size;
403 index = bufmgr_gem->exec_count;
404 bo_gem->validate_index = index;
405 /* Fill in array entry */
406 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
407 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
408 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
409 bufmgr_gem->exec_objects[index].alignment = 0;
410 bufmgr_gem->exec_objects[index].offset = 0;
411 bufmgr_gem->exec_bos[index] = bo;
412 bufmgr_gem->exec_count++;
416 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
418 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
419 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
422 if (bo_gem->validate_index != -1) {
424 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
425 EXEC_OBJECT_NEEDS_FENCE;
429 /* Extend the array of validation entries as necessary. */
430 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
431 int new_size = bufmgr_gem->exec_size * 2;
436 bufmgr_gem->exec2_objects =
437 realloc(bufmgr_gem->exec2_objects,
438 sizeof(*bufmgr_gem->exec2_objects) * new_size);
439 bufmgr_gem->exec_bos =
440 realloc(bufmgr_gem->exec_bos,
441 sizeof(*bufmgr_gem->exec_bos) * new_size);
442 bufmgr_gem->exec_size = new_size;
445 index = bufmgr_gem->exec_count;
446 bo_gem->validate_index = index;
447 /* Fill in array entry */
448 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
449 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
450 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
451 bufmgr_gem->exec2_objects[index].alignment = 0;
452 bufmgr_gem->exec2_objects[index].offset = 0;
453 bufmgr_gem->exec_bos[index] = bo;
454 bufmgr_gem->exec2_objects[index].flags = 0;
455 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
456 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
458 bufmgr_gem->exec2_objects[index].flags |=
459 EXEC_OBJECT_NEEDS_FENCE;
461 bufmgr_gem->exec_count++;
464 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
468 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
469 drm_intel_bo_gem *bo_gem)
473 assert(!bo_gem->used_as_reloc_target);
475 /* The older chipsets are far-less flexible in terms of tiling,
476 * and require tiled buffer to be size aligned in the aperture.
477 * This means that in the worst possible case we will need a hole
478 * twice as large as the object in order for it to fit into the
479 * aperture. Optimal packing is for wimps.
481 size = bo_gem->bo.size;
482 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
485 if (bufmgr_gem->has_relaxed_fencing) {
486 if (bufmgr_gem->gen == 3)
487 min_size = 1024*1024;
491 while (min_size < size)
496 /* Account for worst-case alignment. */
500 bo_gem->reloc_tree_size = size;
504 drm_intel_setup_reloc_list(drm_intel_bo *bo)
506 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
507 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
508 unsigned int max_relocs = bufmgr_gem->max_relocs;
510 if (bo->size / 4 < max_relocs)
511 max_relocs = bo->size / 4;
513 bo_gem->relocs = malloc(max_relocs *
514 sizeof(struct drm_i915_gem_relocation_entry));
515 bo_gem->reloc_target_info = malloc(max_relocs *
516 sizeof(drm_intel_reloc_target));
517 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
518 bo_gem->has_error = true;
520 free (bo_gem->relocs);
521 bo_gem->relocs = NULL;
523 free (bo_gem->reloc_target_info);
524 bo_gem->reloc_target_info = NULL;
533 drm_intel_gem_bo_busy(drm_intel_bo *bo)
535 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
536 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
537 struct drm_i915_gem_busy busy;
540 memset(&busy, 0, sizeof(busy));
541 busy.handle = bo_gem->gem_handle;
543 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
545 return (ret == 0 && busy.busy);
549 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
550 drm_intel_bo_gem *bo_gem, int state)
552 struct drm_i915_gem_madvise madv;
554 madv.handle = bo_gem->gem_handle;
557 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
559 return madv.retained;
563 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
565 return drm_intel_gem_bo_madvise_internal
566 ((drm_intel_bufmgr_gem *) bo->bufmgr,
567 (drm_intel_bo_gem *) bo,
571 /* drop the oldest entries that have been purged by the kernel */
573 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
574 struct drm_intel_gem_bo_bucket *bucket)
576 while (!DRMLISTEMPTY(&bucket->head)) {
577 drm_intel_bo_gem *bo_gem;
579 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
580 bucket->head.next, head);
581 if (drm_intel_gem_bo_madvise_internal
582 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
585 DRMLISTDEL(&bo_gem->head);
586 drm_intel_gem_bo_free(&bo_gem->bo);
590 static drm_intel_bo *
591 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
595 uint32_t tiling_mode,
596 unsigned long stride)
598 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
599 drm_intel_bo_gem *bo_gem;
600 unsigned int page_size = getpagesize();
602 struct drm_intel_gem_bo_bucket *bucket;
603 bool alloc_from_cache;
604 unsigned long bo_size;
605 bool for_render = false;
607 if (flags & BO_ALLOC_FOR_RENDER)
610 /* Round the allocated size up to a power of two number of pages. */
611 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
613 /* If we don't have caching at this size, don't actually round the
616 if (bucket == NULL) {
618 if (bo_size < page_size)
621 bo_size = bucket->size;
624 pthread_mutex_lock(&bufmgr_gem->lock);
625 /* Get a buffer out of the cache if available */
627 alloc_from_cache = false;
628 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
630 /* Allocate new render-target BOs from the tail (MRU)
631 * of the list, as it will likely be hot in the GPU
632 * cache and in the aperture for us.
634 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
635 bucket->head.prev, head);
636 DRMLISTDEL(&bo_gem->head);
637 alloc_from_cache = true;
639 /* For non-render-target BOs (where we're probably
640 * going to map it first thing in order to fill it
641 * with data), check if the last BO in the cache is
642 * unbusy, and only reuse in that case. Otherwise,
643 * allocating a new buffer is probably faster than
644 * waiting for the GPU to finish.
646 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
647 bucket->head.next, head);
648 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
649 alloc_from_cache = true;
650 DRMLISTDEL(&bo_gem->head);
654 if (alloc_from_cache) {
655 if (!drm_intel_gem_bo_madvise_internal
656 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
657 drm_intel_gem_bo_free(&bo_gem->bo);
658 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
663 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
666 drm_intel_gem_bo_free(&bo_gem->bo);
671 pthread_mutex_unlock(&bufmgr_gem->lock);
673 if (!alloc_from_cache) {
674 struct drm_i915_gem_create create;
676 bo_gem = calloc(1, sizeof(*bo_gem));
680 bo_gem->bo.size = bo_size;
681 memset(&create, 0, sizeof(create));
682 create.size = bo_size;
684 ret = drmIoctl(bufmgr_gem->fd,
685 DRM_IOCTL_I915_GEM_CREATE,
687 bo_gem->gem_handle = create.handle;
688 bo_gem->bo.handle = bo_gem->gem_handle;
693 bo_gem->bo.bufmgr = bufmgr;
695 bo_gem->tiling_mode = I915_TILING_NONE;
696 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
699 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
702 drm_intel_gem_bo_free(&bo_gem->bo);
706 DRMINITLISTHEAD(&bo_gem->name_list);
707 DRMINITLISTHEAD(&bo_gem->vma_list);
711 atomic_set(&bo_gem->refcount, 1);
712 bo_gem->validate_index = -1;
713 bo_gem->reloc_tree_fences = 0;
714 bo_gem->used_as_reloc_target = false;
715 bo_gem->has_error = false;
716 bo_gem->reusable = true;
718 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
720 DBG("bo_create: buf %d (%s) %ldb\n",
721 bo_gem->gem_handle, bo_gem->name, size);
726 static drm_intel_bo *
727 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
730 unsigned int alignment)
732 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
734 I915_TILING_NONE, 0);
737 static drm_intel_bo *
738 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
741 unsigned int alignment)
743 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
744 I915_TILING_NONE, 0);
747 static drm_intel_bo *
748 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
749 int x, int y, int cpp, uint32_t *tiling_mode,
750 unsigned long *pitch, unsigned long flags)
752 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
753 unsigned long size, stride;
757 unsigned long aligned_y, height_alignment;
759 tiling = *tiling_mode;
761 /* If we're tiled, our allocations are in 8 or 32-row blocks,
762 * so failure to align our height means that we won't allocate
765 * If we're untiled, we still have to align to 2 rows high
766 * because the data port accesses 2x2 blocks even if the
767 * bottom row isn't to be rendered, so failure to align means
768 * we could walk off the end of the GTT and fault. This is
769 * documented on 965, and may be the case on older chipsets
770 * too so we try to be careful.
773 height_alignment = 2;
775 if (IS_GEN2(bufmgr_gem) && tiling != I915_TILING_NONE)
776 height_alignment = 16;
777 else if (tiling == I915_TILING_X
778 || (IS_915(bufmgr_gem) && tiling == I915_TILING_Y))
779 height_alignment = 8;
780 else if (tiling == I915_TILING_Y)
781 height_alignment = 32;
782 aligned_y = ALIGN(y, height_alignment);
785 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
786 size = stride * aligned_y;
787 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
788 } while (*tiling_mode != tiling);
791 if (tiling == I915_TILING_NONE)
794 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
799 * Returns a drm_intel_bo wrapping the given buffer object handle.
801 * This can be used when one application needs to pass a buffer object
805 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
809 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
810 drm_intel_bo_gem *bo_gem;
812 struct drm_gem_open open_arg;
813 struct drm_i915_gem_get_tiling get_tiling;
816 /* At the moment most applications only have a few named bo.
817 * For instance, in a DRI client only the render buffers passed
818 * between X and the client are named. And since X returns the
819 * alternating names for the front/back buffer a linear search
820 * provides a sufficiently fast match.
822 for (list = bufmgr_gem->named.next;
823 list != &bufmgr_gem->named;
825 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
826 if (bo_gem->global_name == handle) {
827 drm_intel_gem_bo_reference(&bo_gem->bo);
832 bo_gem = calloc(1, sizeof(*bo_gem));
836 memset(&open_arg, 0, sizeof(open_arg));
837 open_arg.name = handle;
838 ret = drmIoctl(bufmgr_gem->fd,
842 DBG("Couldn't reference %s handle 0x%08x: %s\n",
843 name, handle, strerror(errno));
847 bo_gem->bo.size = open_arg.size;
848 bo_gem->bo.offset = 0;
849 bo_gem->bo.virtual = NULL;
850 bo_gem->bo.bufmgr = bufmgr;
852 atomic_set(&bo_gem->refcount, 1);
853 bo_gem->validate_index = -1;
854 bo_gem->gem_handle = open_arg.handle;
855 bo_gem->bo.handle = open_arg.handle;
856 bo_gem->global_name = handle;
857 bo_gem->reusable = false;
859 memset(&get_tiling, 0, sizeof(get_tiling));
860 get_tiling.handle = bo_gem->gem_handle;
861 ret = drmIoctl(bufmgr_gem->fd,
862 DRM_IOCTL_I915_GEM_GET_TILING,
865 drm_intel_gem_bo_unreference(&bo_gem->bo);
868 bo_gem->tiling_mode = get_tiling.tiling_mode;
869 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
870 /* XXX stride is unknown */
871 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
873 DRMINITLISTHEAD(&bo_gem->vma_list);
874 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
875 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
881 drm_intel_gem_bo_free(drm_intel_bo *bo)
883 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
884 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
885 struct drm_gem_close close;
888 DRMLISTDEL(&bo_gem->vma_list);
889 if (bo_gem->mem_virtual) {
890 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
891 bufmgr_gem->vma_count--;
893 if (bo_gem->gtt_virtual) {
894 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
895 bufmgr_gem->vma_count--;
898 /* Close this object */
899 memset(&close, 0, sizeof(close));
900 close.handle = bo_gem->gem_handle;
901 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
903 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
904 bo_gem->gem_handle, bo_gem->name, strerror(errno));
909 /** Frees all cached buffers significantly older than @time. */
911 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
915 if (bufmgr_gem->time == time)
918 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
919 struct drm_intel_gem_bo_bucket *bucket =
920 &bufmgr_gem->cache_bucket[i];
922 while (!DRMLISTEMPTY(&bucket->head)) {
923 drm_intel_bo_gem *bo_gem;
925 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
926 bucket->head.next, head);
927 if (time - bo_gem->free_time <= 1)
930 DRMLISTDEL(&bo_gem->head);
932 drm_intel_gem_bo_free(&bo_gem->bo);
936 bufmgr_gem->time = time;
939 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
943 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
944 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
946 if (bufmgr_gem->vma_max < 0)
949 /* We may need to evict a few entries in order to create new mmaps */
950 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
954 while (bufmgr_gem->vma_count > limit) {
955 drm_intel_bo_gem *bo_gem;
957 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
958 bufmgr_gem->vma_cache.next,
960 assert(bo_gem->map_count == 0);
961 DRMLISTDEL(&bo_gem->vma_list);
963 if (bo_gem->mem_virtual) {
964 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
965 bo_gem->mem_virtual = NULL;
966 bufmgr_gem->vma_count--;
968 if (bo_gem->gtt_virtual) {
969 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
970 bo_gem->gtt_virtual = NULL;
971 bufmgr_gem->vma_count--;
976 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
977 drm_intel_bo_gem *bo_gem)
979 bufmgr_gem->vma_open--;
980 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
981 if (bo_gem->mem_virtual)
982 bufmgr_gem->vma_count++;
983 if (bo_gem->gtt_virtual)
984 bufmgr_gem->vma_count++;
985 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
988 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
989 drm_intel_bo_gem *bo_gem)
991 bufmgr_gem->vma_open++;
992 DRMLISTDEL(&bo_gem->vma_list);
993 if (bo_gem->mem_virtual)
994 bufmgr_gem->vma_count--;
995 if (bo_gem->gtt_virtual)
996 bufmgr_gem->vma_count--;
997 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1001 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1003 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1004 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1005 struct drm_intel_gem_bo_bucket *bucket;
1008 /* Unreference all the target buffers */
1009 for (i = 0; i < bo_gem->reloc_count; i++) {
1010 if (bo_gem->reloc_target_info[i].bo != bo) {
1011 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1012 reloc_target_info[i].bo,
1016 bo_gem->reloc_count = 0;
1017 bo_gem->used_as_reloc_target = false;
1019 DBG("bo_unreference final: %d (%s)\n",
1020 bo_gem->gem_handle, bo_gem->name);
1022 /* release memory associated with this object */
1023 if (bo_gem->reloc_target_info) {
1024 free(bo_gem->reloc_target_info);
1025 bo_gem->reloc_target_info = NULL;
1027 if (bo_gem->relocs) {
1028 free(bo_gem->relocs);
1029 bo_gem->relocs = NULL;
1032 /* Clear any left-over mappings */
1033 if (bo_gem->map_count) {
1034 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1035 bo_gem->map_count = 0;
1036 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1039 DRMLISTDEL(&bo_gem->name_list);
1041 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1042 /* Put the buffer into our internal cache for reuse if we can. */
1043 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1044 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1045 I915_MADV_DONTNEED)) {
1046 bo_gem->free_time = time;
1048 bo_gem->name = NULL;
1049 bo_gem->validate_index = -1;
1051 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1053 drm_intel_gem_bo_free(bo);
1057 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1060 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1062 assert(atomic_read(&bo_gem->refcount) > 0);
1063 if (atomic_dec_and_test(&bo_gem->refcount))
1064 drm_intel_gem_bo_unreference_final(bo, time);
1067 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1069 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1071 assert(atomic_read(&bo_gem->refcount) > 0);
1072 if (atomic_dec_and_test(&bo_gem->refcount)) {
1073 drm_intel_bufmgr_gem *bufmgr_gem =
1074 (drm_intel_bufmgr_gem *) bo->bufmgr;
1075 struct timespec time;
1077 clock_gettime(CLOCK_MONOTONIC, &time);
1079 pthread_mutex_lock(&bufmgr_gem->lock);
1080 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1081 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1082 pthread_mutex_unlock(&bufmgr_gem->lock);
1086 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1088 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1089 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1090 struct drm_i915_gem_set_domain set_domain;
1093 pthread_mutex_lock(&bufmgr_gem->lock);
1095 if (bo_gem->map_count++ == 0)
1096 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1098 if (!bo_gem->mem_virtual) {
1099 struct drm_i915_gem_mmap mmap_arg;
1101 DBG("bo_map: %d (%s), map_count=%d\n",
1102 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1104 memset(&mmap_arg, 0, sizeof(mmap_arg));
1105 mmap_arg.handle = bo_gem->gem_handle;
1106 mmap_arg.offset = 0;
1107 mmap_arg.size = bo->size;
1108 ret = drmIoctl(bufmgr_gem->fd,
1109 DRM_IOCTL_I915_GEM_MMAP,
1113 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1114 __FILE__, __LINE__, bo_gem->gem_handle,
1115 bo_gem->name, strerror(errno));
1116 if (--bo_gem->map_count == 0)
1117 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1118 pthread_mutex_unlock(&bufmgr_gem->lock);
1121 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1123 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1124 bo_gem->mem_virtual);
1125 bo->virtual = bo_gem->mem_virtual;
1127 set_domain.handle = bo_gem->gem_handle;
1128 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1130 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1132 set_domain.write_domain = 0;
1133 ret = drmIoctl(bufmgr_gem->fd,
1134 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1137 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1138 __FILE__, __LINE__, bo_gem->gem_handle,
1143 bo_gem->mapped_cpu_write = true;
1145 pthread_mutex_unlock(&bufmgr_gem->lock);
1150 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1152 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1153 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1154 struct drm_i915_gem_set_domain set_domain;
1157 pthread_mutex_lock(&bufmgr_gem->lock);
1159 if (bo_gem->map_count++ == 0)
1160 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1162 /* Get a mapping of the buffer if we haven't before. */
1163 if (bo_gem->gtt_virtual == NULL) {
1164 struct drm_i915_gem_mmap_gtt mmap_arg;
1166 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1167 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1169 memset(&mmap_arg, 0, sizeof(mmap_arg));
1170 mmap_arg.handle = bo_gem->gem_handle;
1172 /* Get the fake offset back... */
1173 ret = drmIoctl(bufmgr_gem->fd,
1174 DRM_IOCTL_I915_GEM_MMAP_GTT,
1178 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1180 bo_gem->gem_handle, bo_gem->name,
1182 if (--bo_gem->map_count == 0)
1183 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1184 pthread_mutex_unlock(&bufmgr_gem->lock);
1189 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1190 MAP_SHARED, bufmgr_gem->fd,
1192 if (bo_gem->gtt_virtual == MAP_FAILED) {
1193 bo_gem->gtt_virtual = NULL;
1195 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1197 bo_gem->gem_handle, bo_gem->name,
1199 if (--bo_gem->map_count == 0)
1200 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1201 pthread_mutex_unlock(&bufmgr_gem->lock);
1206 bo->virtual = bo_gem->gtt_virtual;
1208 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1209 bo_gem->gtt_virtual);
1211 /* Now move it to the GTT domain so that the CPU caches are flushed */
1212 set_domain.handle = bo_gem->gem_handle;
1213 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1214 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1215 ret = drmIoctl(bufmgr_gem->fd,
1216 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1219 DBG("%s:%d: Error setting domain %d: %s\n",
1220 __FILE__, __LINE__, bo_gem->gem_handle,
1224 pthread_mutex_unlock(&bufmgr_gem->lock);
1229 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1231 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1232 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1233 struct drm_i915_gem_sw_finish sw_finish;
1239 pthread_mutex_lock(&bufmgr_gem->lock);
1241 if (bo_gem->map_count <= 0) {
1242 DBG("attempted to unmap an unmapped bo\n");
1243 pthread_mutex_unlock(&bufmgr_gem->lock);
1244 /* Preserve the old behaviour of just treating this as a
1245 * no-op rather than reporting the error.
1250 if (bo_gem->mapped_cpu_write) {
1251 /* Cause a flush to happen if the buffer's pinned for
1252 * scanout, so the results show up in a timely manner.
1253 * Unlike GTT set domains, this only does work if the
1254 * buffer should be scanout-related.
1256 sw_finish.handle = bo_gem->gem_handle;
1257 ret = drmIoctl(bufmgr_gem->fd,
1258 DRM_IOCTL_I915_GEM_SW_FINISH,
1260 ret = ret == -1 ? -errno : 0;
1262 bo_gem->mapped_cpu_write = false;
1265 /* We need to unmap after every innovation as we cannot track
1266 * an open vma for every bo as that will exhaasut the system
1267 * limits and cause later failures.
1269 if (--bo_gem->map_count == 0) {
1270 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1273 pthread_mutex_unlock(&bufmgr_gem->lock);
1278 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1280 return drm_intel_gem_bo_unmap(bo);
1284 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1285 unsigned long size, const void *data)
1287 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1288 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1289 struct drm_i915_gem_pwrite pwrite;
1292 memset(&pwrite, 0, sizeof(pwrite));
1293 pwrite.handle = bo_gem->gem_handle;
1294 pwrite.offset = offset;
1296 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1297 ret = drmIoctl(bufmgr_gem->fd,
1298 DRM_IOCTL_I915_GEM_PWRITE,
1302 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1303 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1304 (int)size, strerror(errno));
1311 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1313 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1314 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1317 get_pipe_from_crtc_id.crtc_id = crtc_id;
1318 ret = drmIoctl(bufmgr_gem->fd,
1319 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1320 &get_pipe_from_crtc_id);
1322 /* We return -1 here to signal that we don't
1323 * know which pipe is associated with this crtc.
1324 * This lets the caller know that this information
1325 * isn't available; using the wrong pipe for
1326 * vblank waiting can cause the chipset to lock up
1331 return get_pipe_from_crtc_id.pipe;
1335 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1336 unsigned long size, void *data)
1338 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1339 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1340 struct drm_i915_gem_pread pread;
1343 memset(&pread, 0, sizeof(pread));
1344 pread.handle = bo_gem->gem_handle;
1345 pread.offset = offset;
1347 pread.data_ptr = (uint64_t) (uintptr_t) data;
1348 ret = drmIoctl(bufmgr_gem->fd,
1349 DRM_IOCTL_I915_GEM_PREAD,
1353 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1354 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1355 (int)size, strerror(errno));
1361 /** Waits for all GPU rendering with the object to have completed. */
1363 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1365 drm_intel_gem_bo_start_gtt_access(bo, 1);
1369 * Sets the object to the GTT read and possibly write domain, used by the X
1370 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1372 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1373 * can do tiled pixmaps this way.
1376 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1378 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1379 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1380 struct drm_i915_gem_set_domain set_domain;
1383 set_domain.handle = bo_gem->gem_handle;
1384 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1385 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1386 ret = drmIoctl(bufmgr_gem->fd,
1387 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1390 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1391 __FILE__, __LINE__, bo_gem->gem_handle,
1392 set_domain.read_domains, set_domain.write_domain,
1398 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1400 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1403 free(bufmgr_gem->exec2_objects);
1404 free(bufmgr_gem->exec_objects);
1405 free(bufmgr_gem->exec_bos);
1407 pthread_mutex_destroy(&bufmgr_gem->lock);
1409 /* Free any cached buffer objects we were going to reuse */
1410 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1411 struct drm_intel_gem_bo_bucket *bucket =
1412 &bufmgr_gem->cache_bucket[i];
1413 drm_intel_bo_gem *bo_gem;
1415 while (!DRMLISTEMPTY(&bucket->head)) {
1416 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1417 bucket->head.next, head);
1418 DRMLISTDEL(&bo_gem->head);
1420 drm_intel_gem_bo_free(&bo_gem->bo);
1428 * Adds the target buffer to the validation list and adds the relocation
1429 * to the reloc_buffer's relocation list.
1431 * The relocation entry at the given offset must already contain the
1432 * precomputed relocation value, because the kernel will optimize out
1433 * the relocation entry write when the buffer hasn't moved from the
1434 * last known offset in target_bo.
1437 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1438 drm_intel_bo *target_bo, uint32_t target_offset,
1439 uint32_t read_domains, uint32_t write_domain,
1442 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1443 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1444 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1445 bool fenced_command;
1447 if (bo_gem->has_error)
1450 if (target_bo_gem->has_error) {
1451 bo_gem->has_error = true;
1455 /* We never use HW fences for rendering on 965+ */
1456 if (bufmgr_gem->gen >= 4)
1459 fenced_command = need_fence;
1460 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1463 /* Create a new relocation list if needed */
1464 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1467 /* Check overflow */
1468 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1471 assert(offset <= bo->size - 4);
1472 assert((write_domain & (write_domain - 1)) == 0);
1474 /* Make sure that we're not adding a reloc to something whose size has
1475 * already been accounted for.
1477 assert(!bo_gem->used_as_reloc_target);
1478 if (target_bo_gem != bo_gem) {
1479 target_bo_gem->used_as_reloc_target = true;
1480 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1482 /* An object needing a fence is a tiled buffer, so it won't have
1483 * relocs to other buffers.
1486 target_bo_gem->reloc_tree_fences = 1;
1487 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1489 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1490 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1491 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1492 target_bo_gem->gem_handle;
1493 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1494 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1495 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1497 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1498 if (target_bo != bo)
1499 drm_intel_gem_bo_reference(target_bo);
1501 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1502 DRM_INTEL_RELOC_FENCE;
1504 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1506 bo_gem->reloc_count++;
1512 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1513 drm_intel_bo *target_bo, uint32_t target_offset,
1514 uint32_t read_domains, uint32_t write_domain)
1516 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1518 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1519 read_domains, write_domain,
1520 !bufmgr_gem->fenced_relocs);
1524 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1525 drm_intel_bo *target_bo,
1526 uint32_t target_offset,
1527 uint32_t read_domains, uint32_t write_domain)
1529 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1530 read_domains, write_domain, true);
1534 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1536 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1538 return bo_gem->reloc_count;
1542 * Removes existing relocation entries in the BO after "start".
1544 * This allows a user to avoid a two-step process for state setup with
1545 * counting up all the buffer objects and doing a
1546 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1547 * relocations for the state setup. Instead, save the state of the
1548 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1549 * state, and then check if it still fits in the aperture.
1551 * Any further drm_intel_bufmgr_check_aperture_space() queries
1552 * involving this buffer in the tree are undefined after this call.
1555 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1557 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1559 struct timespec time;
1561 clock_gettime(CLOCK_MONOTONIC, &time);
1563 assert(bo_gem->reloc_count >= start);
1564 /* Unreference the cleared target buffers */
1565 for (i = start; i < bo_gem->reloc_count; i++) {
1566 if (bo_gem->reloc_target_info[i].bo != bo) {
1567 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1568 reloc_target_info[i].bo,
1572 bo_gem->reloc_count = start;
1576 * Walk the tree of relocations rooted at BO and accumulate the list of
1577 * validations to be performed and update the relocation buffers with
1578 * index values into the validation list.
1581 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1583 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1586 if (bo_gem->relocs == NULL)
1589 for (i = 0; i < bo_gem->reloc_count; i++) {
1590 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1592 if (target_bo == bo)
1595 /* Continue walking the tree depth-first. */
1596 drm_intel_gem_bo_process_reloc(target_bo);
1598 /* Add the target to the validate list */
1599 drm_intel_add_validate_buffer(target_bo);
1604 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1606 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1609 if (bo_gem->relocs == NULL)
1612 for (i = 0; i < bo_gem->reloc_count; i++) {
1613 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1616 if (target_bo == bo)
1619 /* Continue walking the tree depth-first. */
1620 drm_intel_gem_bo_process_reloc2(target_bo);
1622 need_fence = (bo_gem->reloc_target_info[i].flags &
1623 DRM_INTEL_RELOC_FENCE);
1625 /* Add the target to the validate list */
1626 drm_intel_add_validate_buffer2(target_bo, need_fence);
1632 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1636 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1637 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1638 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1640 /* Update the buffer offset */
1641 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1642 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1643 bo_gem->gem_handle, bo_gem->name, bo->offset,
1644 (unsigned long long)bufmgr_gem->exec_objects[i].
1646 bo->offset = bufmgr_gem->exec_objects[i].offset;
1652 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1656 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1657 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1658 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1660 /* Update the buffer offset */
1661 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1662 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1663 bo_gem->gem_handle, bo_gem->name, bo->offset,
1664 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1665 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1671 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
1672 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
1674 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1675 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1676 struct drm_i915_gem_execbuffer execbuf;
1679 if (bo_gem->has_error)
1682 pthread_mutex_lock(&bufmgr_gem->lock);
1683 /* Update indices and set up the validate list. */
1684 drm_intel_gem_bo_process_reloc(bo);
1686 /* Add the batch buffer to the validation list. There are no
1687 * relocations pointing to it.
1689 drm_intel_add_validate_buffer(bo);
1691 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1692 execbuf.buffer_count = bufmgr_gem->exec_count;
1693 execbuf.batch_start_offset = 0;
1694 execbuf.batch_len = used;
1695 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1696 execbuf.num_cliprects = num_cliprects;
1700 ret = drmIoctl(bufmgr_gem->fd,
1701 DRM_IOCTL_I915_GEM_EXECBUFFER,
1705 if (errno == ENOSPC) {
1706 DBG("Execbuffer fails to pin. "
1707 "Estimate: %u. Actual: %u. Available: %u\n",
1708 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1711 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1714 (unsigned int)bufmgr_gem->gtt_size);
1717 drm_intel_update_buffer_offsets(bufmgr_gem);
1719 if (bufmgr_gem->bufmgr.debug)
1720 drm_intel_gem_dump_validation_list(bufmgr_gem);
1722 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1723 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1724 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1726 /* Disconnect the buffer from the validate list */
1727 bo_gem->validate_index = -1;
1728 bufmgr_gem->exec_bos[i] = NULL;
1730 bufmgr_gem->exec_count = 0;
1731 pthread_mutex_unlock(&bufmgr_gem->lock);
1737 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
1738 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
1741 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1742 struct drm_i915_gem_execbuffer2 execbuf;
1745 switch (flags & 0x7) {
1749 if (!bufmgr_gem->has_blt)
1753 if (!bufmgr_gem->has_bsd)
1756 case I915_EXEC_RENDER:
1757 case I915_EXEC_DEFAULT:
1761 pthread_mutex_lock(&bufmgr_gem->lock);
1762 /* Update indices and set up the validate list. */
1763 drm_intel_gem_bo_process_reloc2(bo);
1765 /* Add the batch buffer to the validation list. There are no relocations
1768 drm_intel_add_validate_buffer2(bo, 0);
1770 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
1771 execbuf.buffer_count = bufmgr_gem->exec_count;
1772 execbuf.batch_start_offset = 0;
1773 execbuf.batch_len = used;
1774 execbuf.cliprects_ptr = (uintptr_t)cliprects;
1775 execbuf.num_cliprects = num_cliprects;
1778 execbuf.flags = flags;
1782 ret = drmIoctl(bufmgr_gem->fd,
1783 DRM_IOCTL_I915_GEM_EXECBUFFER2,
1787 if (ret == -ENOSPC) {
1788 DBG("Execbuffer fails to pin. "
1789 "Estimate: %u. Actual: %u. Available: %u\n",
1790 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1791 bufmgr_gem->exec_count),
1792 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1793 bufmgr_gem->exec_count),
1794 (unsigned int) bufmgr_gem->gtt_size);
1797 drm_intel_update_buffer_offsets2(bufmgr_gem);
1799 if (bufmgr_gem->bufmgr.debug)
1800 drm_intel_gem_dump_validation_list(bufmgr_gem);
1802 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1803 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1804 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1806 /* Disconnect the buffer from the validate list */
1807 bo_gem->validate_index = -1;
1808 bufmgr_gem->exec_bos[i] = NULL;
1810 bufmgr_gem->exec_count = 0;
1811 pthread_mutex_unlock(&bufmgr_gem->lock);
1817 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
1818 drm_clip_rect_t *cliprects, int num_cliprects,
1821 return drm_intel_gem_bo_mrb_exec2(bo, used,
1822 cliprects, num_cliprects, DR4,
1827 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
1829 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1830 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1831 struct drm_i915_gem_pin pin;
1834 memset(&pin, 0, sizeof(pin));
1835 pin.handle = bo_gem->gem_handle;
1836 pin.alignment = alignment;
1838 ret = drmIoctl(bufmgr_gem->fd,
1839 DRM_IOCTL_I915_GEM_PIN,
1844 bo->offset = pin.offset;
1849 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
1851 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1852 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1853 struct drm_i915_gem_unpin unpin;
1856 memset(&unpin, 0, sizeof(unpin));
1857 unpin.handle = bo_gem->gem_handle;
1859 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
1867 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
1868 uint32_t tiling_mode,
1871 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1872 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1873 struct drm_i915_gem_set_tiling set_tiling;
1876 if (bo_gem->global_name == 0 &&
1877 tiling_mode == bo_gem->tiling_mode &&
1878 stride == bo_gem->stride)
1881 memset(&set_tiling, 0, sizeof(set_tiling));
1883 /* set_tiling is slightly broken and overwrites the
1884 * input on the error path, so we have to open code
1887 set_tiling.handle = bo_gem->gem_handle;
1888 set_tiling.tiling_mode = tiling_mode;
1889 set_tiling.stride = stride;
1891 ret = ioctl(bufmgr_gem->fd,
1892 DRM_IOCTL_I915_GEM_SET_TILING,
1894 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1898 bo_gem->tiling_mode = set_tiling.tiling_mode;
1899 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
1900 bo_gem->stride = set_tiling.stride;
1905 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1908 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1909 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1912 /* Linear buffers have no stride. By ensuring that we only ever use
1913 * stride 0 with linear buffers, we simplify our code.
1915 if (*tiling_mode == I915_TILING_NONE)
1918 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
1920 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
1922 *tiling_mode = bo_gem->tiling_mode;
1927 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1928 uint32_t * swizzle_mode)
1930 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1932 *tiling_mode = bo_gem->tiling_mode;
1933 *swizzle_mode = bo_gem->swizzle_mode;
1938 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
1940 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1941 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1942 struct drm_gem_flink flink;
1945 if (!bo_gem->global_name) {
1946 memset(&flink, 0, sizeof(flink));
1947 flink.handle = bo_gem->gem_handle;
1949 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1952 bo_gem->global_name = flink.name;
1953 bo_gem->reusable = false;
1955 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
1958 *name = bo_gem->global_name;
1963 * Enables unlimited caching of buffer objects for reuse.
1965 * This is potentially very memory expensive, as the cache at each bucket
1966 * size is only bounded by how many buffers of that size we've managed to have
1967 * in flight at once.
1970 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
1972 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1974 bufmgr_gem->bo_reuse = true;
1978 * Enable use of fenced reloc type.
1980 * New code should enable this to avoid unnecessary fence register
1981 * allocation. If this option is not enabled, all relocs will have fence
1982 * register allocated.
1985 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
1987 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
1989 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
1990 bufmgr_gem->fenced_relocs = true;
1994 * Return the additional aperture space required by the tree of buffer objects
1998 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2000 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2004 if (bo == NULL || bo_gem->included_in_check_aperture)
2008 bo_gem->included_in_check_aperture = true;
2010 for (i = 0; i < bo_gem->reloc_count; i++)
2012 drm_intel_gem_bo_get_aperture_space(bo_gem->
2013 reloc_target_info[i].bo);
2019 * Count the number of buffers in this list that need a fence reg
2021 * If the count is greater than the number of available regs, we'll have
2022 * to ask the caller to resubmit a batch with fewer tiled buffers.
2024 * This function over-counts if the same buffer is used multiple times.
2027 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2030 unsigned int total = 0;
2032 for (i = 0; i < count; i++) {
2033 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2038 total += bo_gem->reloc_tree_fences;
2044 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2045 * for the next drm_intel_bufmgr_check_aperture_space() call.
2048 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2050 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2053 if (bo == NULL || !bo_gem->included_in_check_aperture)
2056 bo_gem->included_in_check_aperture = false;
2058 for (i = 0; i < bo_gem->reloc_count; i++)
2059 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2060 reloc_target_info[i].bo);
2064 * Return a conservative estimate for the amount of aperture required
2065 * for a collection of buffers. This may double-count some buffers.
2068 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2071 unsigned int total = 0;
2073 for (i = 0; i < count; i++) {
2074 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2076 total += bo_gem->reloc_tree_size;
2082 * Return the amount of aperture needed for a collection of buffers.
2083 * This avoids double counting any buffers, at the cost of looking
2084 * at every buffer in the set.
2087 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2090 unsigned int total = 0;
2092 for (i = 0; i < count; i++) {
2093 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2094 /* For the first buffer object in the array, we get an
2095 * accurate count back for its reloc_tree size (since nothing
2096 * had been flagged as being counted yet). We can save that
2097 * value out as a more conservative reloc_tree_size that
2098 * avoids double-counting target buffers. Since the first
2099 * buffer happens to usually be the batch buffer in our
2100 * callers, this can pull us back from doing the tree
2101 * walk on every new batch emit.
2104 drm_intel_bo_gem *bo_gem =
2105 (drm_intel_bo_gem *) bo_array[i];
2106 bo_gem->reloc_tree_size = total;
2110 for (i = 0; i < count; i++)
2111 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2116 * Return -1 if the batchbuffer should be flushed before attempting to
2117 * emit rendering referencing the buffers pointed to by bo_array.
2119 * This is required because if we try to emit a batchbuffer with relocations
2120 * to a tree of buffers that won't simultaneously fit in the aperture,
2121 * the rendering will return an error at a point where the software is not
2122 * prepared to recover from it.
2124 * However, we also want to emit the batchbuffer significantly before we reach
2125 * the limit, as a series of batchbuffers each of which references buffers
2126 * covering almost all of the aperture means that at each emit we end up
2127 * waiting to evict a buffer from the last rendering, and we get synchronous
2128 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2129 * get better parallelism.
2132 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2134 drm_intel_bufmgr_gem *bufmgr_gem =
2135 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2136 unsigned int total = 0;
2137 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2140 /* Check for fence reg constraints if necessary */
2141 if (bufmgr_gem->available_fences) {
2142 total_fences = drm_intel_gem_total_fences(bo_array, count);
2143 if (total_fences > bufmgr_gem->available_fences)
2147 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2149 if (total > threshold)
2150 total = drm_intel_gem_compute_batch_space(bo_array, count);
2152 if (total > threshold) {
2153 DBG("check_space: overflowed available aperture, "
2155 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2158 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2159 (int)bufmgr_gem->gtt_size / 1024);
2165 * Disable buffer reuse for objects which are shared with the kernel
2166 * as scanout buffers
2169 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2171 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2173 bo_gem->reusable = false;
2178 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2180 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2182 return bo_gem->reusable;
2186 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2188 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2191 for (i = 0; i < bo_gem->reloc_count; i++) {
2192 if (bo_gem->reloc_target_info[i].bo == target_bo)
2194 if (bo == bo_gem->reloc_target_info[i].bo)
2196 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2204 /** Return true if target_bo is referenced by bo's relocation tree. */
2206 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2208 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2210 if (bo == NULL || target_bo == NULL)
2212 if (target_bo_gem->used_as_reloc_target)
2213 return _drm_intel_gem_bo_references(bo, target_bo);
2218 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2220 unsigned int i = bufmgr_gem->num_buckets;
2222 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2224 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2225 bufmgr_gem->cache_bucket[i].size = size;
2226 bufmgr_gem->num_buckets++;
2230 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2232 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2234 /* OK, so power of two buckets was too wasteful of memory.
2235 * Give 3 other sizes between each power of two, to hopefully
2236 * cover things accurately enough. (The alternative is
2237 * probably to just go for exact matching of sizes, and assume
2238 * that for things like composited window resize the tiled
2239 * width/height alignment and rounding of sizes to pages will
2240 * get us useful cache hit rates anyway)
2242 add_bucket(bufmgr_gem, 4096);
2243 add_bucket(bufmgr_gem, 4096 * 2);
2244 add_bucket(bufmgr_gem, 4096 * 3);
2246 /* Initialize the linked lists for BO reuse cache. */
2247 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2248 add_bucket(bufmgr_gem, size);
2250 add_bucket(bufmgr_gem, size + size * 1 / 4);
2251 add_bucket(bufmgr_gem, size + size * 2 / 4);
2252 add_bucket(bufmgr_gem, size + size * 3 / 4);
2257 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
2259 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2261 bufmgr_gem->vma_max = limit;
2263 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
2267 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
2268 * and manage map buffer objections.
2270 * \param fd File descriptor of the opened DRM device.
2273 drm_intel_bufmgr_gem_init(int fd, int batch_size)
2275 drm_intel_bufmgr_gem *bufmgr_gem;
2276 struct drm_i915_gem_get_aperture aperture;
2277 drm_i915_getparam_t gp;
2281 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
2282 if (bufmgr_gem == NULL)
2285 bufmgr_gem->fd = fd;
2287 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
2292 ret = drmIoctl(bufmgr_gem->fd,
2293 DRM_IOCTL_I915_GEM_GET_APERTURE,
2297 bufmgr_gem->gtt_size = aperture.aper_available_size;
2299 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
2301 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
2302 fprintf(stderr, "Assuming %dkB available aperture size.\n"
2303 "May lead to reduced performance or incorrect "
2305 (int)bufmgr_gem->gtt_size / 1024);
2308 gp.param = I915_PARAM_CHIPSET_ID;
2309 gp.value = &bufmgr_gem->pci_device;
2310 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2312 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2313 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2316 if (IS_GEN2(bufmgr_gem))
2317 bufmgr_gem->gen = 2;
2318 else if (IS_GEN3(bufmgr_gem))
2319 bufmgr_gem->gen = 3;
2320 else if (IS_GEN4(bufmgr_gem))
2321 bufmgr_gem->gen = 4;
2323 bufmgr_gem->gen = 6;
2325 if (IS_GEN3(bufmgr_gem) && bufmgr_gem->gtt_size > 256*1024*1024) {
2326 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
2327 * be used for tiled blits. To simplify the accounting, just
2328 * substract the unmappable part (fixed to 256MB on all known
2329 * gen3 devices) if the kernel advertises it. */
2330 bufmgr_gem->gtt_size -= 256*1024*1024;
2335 gp.param = I915_PARAM_HAS_EXECBUF2;
2336 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2340 gp.param = I915_PARAM_HAS_BSD;
2341 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2342 bufmgr_gem->has_bsd = ret == 0;
2344 gp.param = I915_PARAM_HAS_BLT;
2345 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2346 bufmgr_gem->has_blt = ret == 0;
2348 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
2349 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2350 bufmgr_gem->has_relaxed_fencing = ret == 0;
2352 if (bufmgr_gem->gen < 4) {
2353 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
2354 gp.value = &bufmgr_gem->available_fences;
2355 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2357 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
2359 fprintf(stderr, "param: %d, val: %d\n", gp.param,
2361 bufmgr_gem->available_fences = 0;
2363 /* XXX The kernel reports the total number of fences,
2364 * including any that may be pinned.
2366 * We presume that there will be at least one pinned
2367 * fence for the scanout buffer, but there may be more
2368 * than one scanout and the user may be manually
2369 * pinning buffers. Let's move to execbuffer2 and
2370 * thereby forget the insanity of using fences...
2372 bufmgr_gem->available_fences -= 2;
2373 if (bufmgr_gem->available_fences < 0)
2374 bufmgr_gem->available_fences = 0;
2378 /* Let's go with one relocation per every 2 dwords (but round down a bit
2379 * since a power of two will mean an extra page allocation for the reloc
2382 * Every 4 was too few for the blender benchmark.
2384 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
2386 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
2387 bufmgr_gem->bufmgr.bo_alloc_for_render =
2388 drm_intel_gem_bo_alloc_for_render;
2389 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
2390 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
2391 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
2392 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
2393 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
2394 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
2395 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
2396 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
2397 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
2398 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
2399 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
2400 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
2401 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
2402 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
2403 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
2404 /* Use the new one if available */
2406 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
2407 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
2409 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
2410 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
2411 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
2412 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
2413 bufmgr_gem->bufmgr.debug = 0;
2414 bufmgr_gem->bufmgr.check_aperture_space =
2415 drm_intel_gem_check_aperture_space;
2416 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
2417 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
2418 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
2419 drm_intel_gem_get_pipe_from_crtc_id;
2420 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
2422 DRMINITLISTHEAD(&bufmgr_gem->named);
2423 init_cache_buckets(bufmgr_gem);
2425 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
2426 bufmgr_gem->vma_max = -1; /* unlimited by default */
2428 return &bufmgr_gem->bufmgr;