1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
38 #include <xf86atomic.h>
46 #include <sys/ioctl.h>
48 #include <sys/types.h>
53 #define ETIME ETIMEDOUT
55 #include "libdrm_macros.h"
56 #include "libdrm_lists.h"
57 #include "intel_bufmgr.h"
58 #include "intel_bufmgr_priv.h"
59 #include "intel_chipset.h"
73 #define memclear(s) memset(&s, 0, sizeof(s))
75 #define DBG(...) do { \
76 if (bufmgr_gem->bufmgr.debug) \
77 fprintf(stderr, __VA_ARGS__); \
80 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
81 #define MAX2(A, B) ((A) > (B) ? (A) : (B))
84 * upper_32_bits - return bits 32-63 of a number
85 * @n: the number we're accessing
87 * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
88 * the "right shift count >= width of type" warning when that quantity is
91 #define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16))
94 * lower_32_bits - return bits 0-31 of a number
95 * @n: the number we're accessing
97 #define lower_32_bits(n) ((__u32)(n))
99 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
101 struct drm_intel_gem_bo_bucket {
106 typedef struct _drm_intel_bufmgr_gem {
107 drm_intel_bufmgr bufmgr;
115 pthread_mutex_t lock;
117 struct drm_i915_gem_exec_object *exec_objects;
118 struct drm_i915_gem_exec_object2 *exec2_objects;
119 drm_intel_bo **exec_bos;
123 /** Array of lists of cached gem objects of power-of-two sizes */
124 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
128 drmMMListHead managers;
130 drm_intel_bo_gem *name_table;
131 drm_intel_bo_gem *handle_table;
133 drmMMListHead vma_cache;
134 int vma_count, vma_open, vma_max;
137 int available_fences;
140 unsigned int has_bsd : 1;
141 unsigned int has_blt : 1;
142 unsigned int has_relaxed_fencing : 1;
143 unsigned int has_llc : 1;
144 unsigned int has_wait_timeout : 1;
145 unsigned int bo_reuse : 1;
146 unsigned int no_exec : 1;
147 unsigned int has_vebox : 1;
148 unsigned int has_exec_async : 1;
156 } drm_intel_bufmgr_gem;
158 #define DRM_INTEL_RELOC_FENCE (1<<0)
160 typedef struct _drm_intel_reloc_target_info {
163 } drm_intel_reloc_target;
165 struct _drm_intel_bo_gem {
173 * Kenel-assigned global name for this object
175 * List contains both flink named and prime fd'd objects
177 unsigned int global_name;
179 UT_hash_handle handle_hh;
180 UT_hash_handle name_hh;
183 * Index of the buffer within the validation list while preparing a
184 * batchbuffer execution.
189 * Current tiling mode
191 uint32_t tiling_mode;
192 uint32_t swizzle_mode;
193 unsigned long stride;
195 unsigned long kflags;
199 /** Array passed to the DRM containing relocation information. */
200 struct drm_i915_gem_relocation_entry *relocs;
202 * Array of info structs corresponding to relocs[i].target_handle etc
204 drm_intel_reloc_target *reloc_target_info;
205 /** Number of entries in relocs */
207 /** Array of BOs that are referenced by this buffer and will be softpinned */
208 drm_intel_bo **softpin_target;
209 /** Number softpinned BOs that are referenced by this buffer */
210 int softpin_target_count;
211 /** Maximum amount of softpinned BOs that are referenced by this buffer */
212 int softpin_target_size;
214 /** Mapped address for the buffer, saved across map/unmap cycles */
216 /** GTT virtual address for the buffer, saved across map/unmap cycles */
218 /** WC CPU address for the buffer, saved across map/unmap cycles */
221 * Virtual address of the buffer allocated by user, used for userptr
226 drmMMListHead vma_list;
232 * Boolean of whether this BO and its children have been included in
233 * the current drm_intel_bufmgr_check_aperture_space() total.
235 bool included_in_check_aperture;
238 * Boolean of whether this buffer has been used as a relocation
239 * target and had its size accounted for, and thus can't have any
240 * further relocations added to it.
242 bool used_as_reloc_target;
245 * Boolean of whether we have encountered an error whilst building the relocation tree.
250 * Boolean of whether this buffer can be re-used
255 * Boolean of whether the GPU is definitely not accessing the buffer.
257 * This is only valid when reusable, since non-reusable
258 * buffers are those that have been shared with other
259 * processes, so we don't know their state.
264 * Boolean of whether this buffer was allocated with userptr
269 * Size in bytes of this buffer and its relocation descendents.
271 * Used to avoid costly tree walking in
272 * drm_intel_bufmgr_check_aperture in the common case.
277 * Number of potential fence registers required by this buffer and its
280 int reloc_tree_fences;
282 /** Flags that we may need to do the SW_FINISH ioctl on unmap. */
283 bool mapped_cpu_write;
287 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
290 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
293 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
294 uint32_t * swizzle_mode);
297 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
298 uint32_t tiling_mode,
301 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
304 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
306 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
308 static inline drm_intel_bo_gem *to_bo_gem(drm_intel_bo *bo)
310 return (drm_intel_bo_gem *)bo;
314 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
315 uint32_t *tiling_mode)
317 unsigned long min_size, max_size;
320 if (*tiling_mode == I915_TILING_NONE)
323 /* 965+ just need multiples of page size for tiling */
324 if (bufmgr_gem->gen >= 4)
325 return ROUND_UP_TO(size, 4096);
327 /* Older chips need powers of two, of at least 512k or 1M */
328 if (bufmgr_gem->gen == 3) {
329 min_size = 1024*1024;
330 max_size = 128*1024*1024;
333 max_size = 64*1024*1024;
336 if (size > max_size) {
337 *tiling_mode = I915_TILING_NONE;
341 /* Do we need to allocate every page for the fence? */
342 if (bufmgr_gem->has_relaxed_fencing)
343 return ROUND_UP_TO(size, 4096);
345 for (i = min_size; i < size; i <<= 1)
352 * Round a given pitch up to the minimum required for X tiling on a
353 * given chip. We use 512 as the minimum to allow for a later tiling
357 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
358 unsigned long pitch, uint32_t *tiling_mode)
360 unsigned long tile_width;
363 /* If untiled, then just align it so that we can do rendering
364 * to it with the 3D engine.
366 if (*tiling_mode == I915_TILING_NONE)
367 return ALIGN(pitch, 64);
369 if (*tiling_mode == I915_TILING_X
370 || (IS_915(bufmgr_gem->pci_device)
371 && *tiling_mode == I915_TILING_Y))
376 /* 965 is flexible */
377 if (bufmgr_gem->gen >= 4)
378 return ROUND_UP_TO(pitch, tile_width);
380 /* The older hardware has a maximum pitch of 8192 with tiled
381 * surfaces, so fallback to untiled if it's too large.
384 *tiling_mode = I915_TILING_NONE;
385 return ALIGN(pitch, 64);
388 /* Pre-965 needs power of two tile width */
389 for (i = tile_width; i < pitch; i <<= 1)
395 static struct drm_intel_gem_bo_bucket *
396 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
401 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
402 struct drm_intel_gem_bo_bucket *bucket =
403 &bufmgr_gem->cache_bucket[i];
404 if (bucket->size >= size) {
413 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
417 for (i = 0; i < bufmgr_gem->exec_count; i++) {
418 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
419 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
421 if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) {
422 DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle,
423 bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
428 for (j = 0; j < bo_gem->reloc_count; j++) {
429 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
430 drm_intel_bo_gem *target_gem =
431 (drm_intel_bo_gem *) target_bo;
433 DBG("%2d: %d %s(%s)@0x%08x %08x -> "
434 "%d (%s)@0x%08x %08x + 0x%08x\n",
437 bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
439 upper_32_bits(bo_gem->relocs[j].offset),
440 lower_32_bits(bo_gem->relocs[j].offset),
441 target_gem->gem_handle,
443 upper_32_bits(target_bo->offset64),
444 lower_32_bits(target_bo->offset64),
445 bo_gem->relocs[j].delta);
448 for (j = 0; j < bo_gem->softpin_target_count; j++) {
449 drm_intel_bo *target_bo = bo_gem->softpin_target[j];
450 drm_intel_bo_gem *target_gem =
451 (drm_intel_bo_gem *) target_bo;
452 DBG("%2d: %d %s(%s) -> "
453 "%d *(%s)@0x%08x %08x\n",
456 bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
458 target_gem->gem_handle,
460 upper_32_bits(target_bo->offset64),
461 lower_32_bits(target_bo->offset64));
467 drm_intel_gem_bo_reference(drm_intel_bo *bo)
469 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
471 atomic_inc(&bo_gem->refcount);
475 * Adds the given buffer to the list of buffers to be validated (moved into the
476 * appropriate memory type) with the next batch submission.
478 * If a buffer is validated multiple times in a batch submission, it ends up
479 * with the intersection of the memory type flags and the union of the
483 drm_intel_add_validate_buffer(drm_intel_bo *bo)
485 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
486 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
489 if (bo_gem->validate_index != -1)
492 /* Extend the array of validation entries as necessary. */
493 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
494 int new_size = bufmgr_gem->exec_size * 2;
499 bufmgr_gem->exec_objects =
500 realloc(bufmgr_gem->exec_objects,
501 sizeof(*bufmgr_gem->exec_objects) * new_size);
502 bufmgr_gem->exec_bos =
503 realloc(bufmgr_gem->exec_bos,
504 sizeof(*bufmgr_gem->exec_bos) * new_size);
505 bufmgr_gem->exec_size = new_size;
508 index = bufmgr_gem->exec_count;
509 bo_gem->validate_index = index;
510 /* Fill in array entry */
511 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
512 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
513 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
514 bufmgr_gem->exec_objects[index].alignment = bo->align;
515 bufmgr_gem->exec_objects[index].offset = 0;
516 bufmgr_gem->exec_bos[index] = bo;
517 bufmgr_gem->exec_count++;
521 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
523 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
524 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
530 flags |= EXEC_OBJECT_NEEDS_FENCE;
532 if (bo_gem->validate_index != -1) {
533 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags;
537 /* Extend the array of validation entries as necessary. */
538 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
539 int new_size = bufmgr_gem->exec_size * 2;
544 bufmgr_gem->exec2_objects =
545 realloc(bufmgr_gem->exec2_objects,
546 sizeof(*bufmgr_gem->exec2_objects) * new_size);
547 bufmgr_gem->exec_bos =
548 realloc(bufmgr_gem->exec_bos,
549 sizeof(*bufmgr_gem->exec_bos) * new_size);
550 bufmgr_gem->exec_size = new_size;
553 index = bufmgr_gem->exec_count;
554 bo_gem->validate_index = index;
555 /* Fill in array entry */
556 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
557 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
558 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
559 bufmgr_gem->exec2_objects[index].alignment = bo->align;
560 bufmgr_gem->exec2_objects[index].offset = bo->offset64;
561 bufmgr_gem->exec2_objects[index].flags = bo_gem->kflags | flags;
562 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
563 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
564 bufmgr_gem->exec_bos[index] = bo;
565 bufmgr_gem->exec_count++;
568 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
572 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
573 drm_intel_bo_gem *bo_gem,
574 unsigned int alignment)
578 assert(!bo_gem->used_as_reloc_target);
580 /* The older chipsets are far-less flexible in terms of tiling,
581 * and require tiled buffer to be size aligned in the aperture.
582 * This means that in the worst possible case we will need a hole
583 * twice as large as the object in order for it to fit into the
584 * aperture. Optimal packing is for wimps.
586 size = bo_gem->bo.size;
587 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
588 unsigned int min_size;
590 if (bufmgr_gem->has_relaxed_fencing) {
591 if (bufmgr_gem->gen == 3)
592 min_size = 1024*1024;
596 while (min_size < size)
601 /* Account for worst-case alignment. */
602 alignment = MAX2(alignment, min_size);
605 bo_gem->reloc_tree_size = size + alignment;
609 drm_intel_setup_reloc_list(drm_intel_bo *bo)
611 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
612 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
613 unsigned int max_relocs = bufmgr_gem->max_relocs;
615 if (bo->size / 4 < max_relocs)
616 max_relocs = bo->size / 4;
618 bo_gem->relocs = malloc(max_relocs *
619 sizeof(struct drm_i915_gem_relocation_entry));
620 bo_gem->reloc_target_info = malloc(max_relocs *
621 sizeof(drm_intel_reloc_target));
622 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
623 bo_gem->has_error = true;
625 free (bo_gem->relocs);
626 bo_gem->relocs = NULL;
628 free (bo_gem->reloc_target_info);
629 bo_gem->reloc_target_info = NULL;
638 drm_intel_gem_bo_busy(drm_intel_bo *bo)
640 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
641 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
642 struct drm_i915_gem_busy busy;
645 if (bo_gem->reusable && bo_gem->idle)
649 busy.handle = bo_gem->gem_handle;
651 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
653 bo_gem->idle = !busy.busy;
661 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
662 drm_intel_bo_gem *bo_gem, int state)
664 struct drm_i915_gem_madvise madv;
667 madv.handle = bo_gem->gem_handle;
670 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
672 return madv.retained;
676 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
678 return drm_intel_gem_bo_madvise_internal
679 ((drm_intel_bufmgr_gem *) bo->bufmgr,
680 (drm_intel_bo_gem *) bo,
684 /* drop the oldest entries that have been purged by the kernel */
686 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
687 struct drm_intel_gem_bo_bucket *bucket)
689 while (!DRMLISTEMPTY(&bucket->head)) {
690 drm_intel_bo_gem *bo_gem;
692 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
693 bucket->head.next, head);
694 if (drm_intel_gem_bo_madvise_internal
695 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
698 DRMLISTDEL(&bo_gem->head);
699 drm_intel_gem_bo_free(&bo_gem->bo);
703 static drm_intel_bo *
704 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
708 uint32_t tiling_mode,
709 unsigned long stride,
710 unsigned int alignment)
712 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
713 drm_intel_bo_gem *bo_gem;
714 unsigned int page_size = getpagesize();
716 struct drm_intel_gem_bo_bucket *bucket;
717 bool alloc_from_cache;
718 unsigned long bo_size;
719 bool for_render = false;
721 if (flags & BO_ALLOC_FOR_RENDER)
724 /* Round the allocated size up to a power of two number of pages. */
725 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
727 /* If we don't have caching at this size, don't actually round the
730 if (bucket == NULL) {
732 if (bo_size < page_size)
735 bo_size = bucket->size;
738 pthread_mutex_lock(&bufmgr_gem->lock);
739 /* Get a buffer out of the cache if available */
741 alloc_from_cache = false;
742 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
744 /* Allocate new render-target BOs from the tail (MRU)
745 * of the list, as it will likely be hot in the GPU
746 * cache and in the aperture for us.
748 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
749 bucket->head.prev, head);
750 DRMLISTDEL(&bo_gem->head);
751 alloc_from_cache = true;
752 bo_gem->bo.align = alignment;
754 assert(alignment == 0);
755 /* For non-render-target BOs (where we're probably
756 * going to map it first thing in order to fill it
757 * with data), check if the last BO in the cache is
758 * unbusy, and only reuse in that case. Otherwise,
759 * allocating a new buffer is probably faster than
760 * waiting for the GPU to finish.
762 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
763 bucket->head.next, head);
764 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
765 alloc_from_cache = true;
766 DRMLISTDEL(&bo_gem->head);
770 if (alloc_from_cache) {
771 if (!drm_intel_gem_bo_madvise_internal
772 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
773 drm_intel_gem_bo_free(&bo_gem->bo);
774 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
779 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
782 drm_intel_gem_bo_free(&bo_gem->bo);
788 if (!alloc_from_cache) {
789 struct drm_i915_gem_create create;
791 bo_gem = calloc(1, sizeof(*bo_gem));
795 /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
796 list (vma_list), so better set the list head here */
797 DRMINITLISTHEAD(&bo_gem->vma_list);
799 bo_gem->bo.size = bo_size;
802 create.size = bo_size;
804 ret = drmIoctl(bufmgr_gem->fd,
805 DRM_IOCTL_I915_GEM_CREATE,
812 bo_gem->gem_handle = create.handle;
813 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
814 gem_handle, sizeof(bo_gem->gem_handle),
817 bo_gem->bo.handle = bo_gem->gem_handle;
818 bo_gem->bo.bufmgr = bufmgr;
819 bo_gem->bo.align = alignment;
821 bo_gem->tiling_mode = I915_TILING_NONE;
822 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
825 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
832 atomic_set(&bo_gem->refcount, 1);
833 bo_gem->validate_index = -1;
834 bo_gem->reloc_tree_fences = 0;
835 bo_gem->used_as_reloc_target = false;
836 bo_gem->has_error = false;
837 bo_gem->reusable = true;
839 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
840 pthread_mutex_unlock(&bufmgr_gem->lock);
842 DBG("bo_create: buf %d (%s) %ldb\n",
843 bo_gem->gem_handle, bo_gem->name, size);
848 drm_intel_gem_bo_free(&bo_gem->bo);
850 pthread_mutex_unlock(&bufmgr_gem->lock);
854 static drm_intel_bo *
855 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
858 unsigned int alignment)
860 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
866 static drm_intel_bo *
867 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
870 unsigned int alignment)
872 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
873 I915_TILING_NONE, 0, 0);
876 static drm_intel_bo *
877 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
878 int x, int y, int cpp, uint32_t *tiling_mode,
879 unsigned long *pitch, unsigned long flags)
881 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
882 unsigned long size, stride;
886 unsigned long aligned_y, height_alignment;
888 tiling = *tiling_mode;
890 /* If we're tiled, our allocations are in 8 or 32-row blocks,
891 * so failure to align our height means that we won't allocate
894 * If we're untiled, we still have to align to 2 rows high
895 * because the data port accesses 2x2 blocks even if the
896 * bottom row isn't to be rendered, so failure to align means
897 * we could walk off the end of the GTT and fault. This is
898 * documented on 965, and may be the case on older chipsets
899 * too so we try to be careful.
902 height_alignment = 2;
904 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
905 height_alignment = 16;
906 else if (tiling == I915_TILING_X
907 || (IS_915(bufmgr_gem->pci_device)
908 && tiling == I915_TILING_Y))
909 height_alignment = 8;
910 else if (tiling == I915_TILING_Y)
911 height_alignment = 32;
912 aligned_y = ALIGN(y, height_alignment);
915 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
916 size = stride * aligned_y;
917 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
918 } while (*tiling_mode != tiling);
921 if (tiling == I915_TILING_NONE)
924 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
928 static drm_intel_bo *
929 drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
932 uint32_t tiling_mode,
937 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
938 drm_intel_bo_gem *bo_gem;
940 struct drm_i915_gem_userptr userptr;
942 /* Tiling with userptr surfaces is not supported
943 * on all hardware so refuse it for time being.
945 if (tiling_mode != I915_TILING_NONE)
948 bo_gem = calloc(1, sizeof(*bo_gem));
952 atomic_set(&bo_gem->refcount, 1);
953 DRMINITLISTHEAD(&bo_gem->vma_list);
955 bo_gem->bo.size = size;
958 userptr.user_ptr = (__u64)((unsigned long)addr);
959 userptr.user_size = size;
960 userptr.flags = flags;
962 ret = drmIoctl(bufmgr_gem->fd,
963 DRM_IOCTL_I915_GEM_USERPTR,
966 DBG("bo_create_userptr: "
967 "ioctl failed with user ptr %p size 0x%lx, "
968 "user flags 0x%lx\n", addr, size, flags);
973 pthread_mutex_lock(&bufmgr_gem->lock);
975 bo_gem->gem_handle = userptr.handle;
976 bo_gem->bo.handle = bo_gem->gem_handle;
977 bo_gem->bo.bufmgr = bufmgr;
978 bo_gem->is_userptr = true;
979 bo_gem->bo.virtual = addr;
980 /* Save the address provided by user */
981 bo_gem->user_virtual = addr;
982 bo_gem->tiling_mode = I915_TILING_NONE;
983 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
986 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
987 gem_handle, sizeof(bo_gem->gem_handle),
991 bo_gem->validate_index = -1;
992 bo_gem->reloc_tree_fences = 0;
993 bo_gem->used_as_reloc_target = false;
994 bo_gem->has_error = false;
995 bo_gem->reusable = false;
997 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
998 pthread_mutex_unlock(&bufmgr_gem->lock);
1000 DBG("bo_create_userptr: "
1001 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
1002 addr, bo_gem->gem_handle, bo_gem->name,
1003 size, stride, tiling_mode);
1009 has_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
1014 struct drm_i915_gem_userptr userptr;
1016 pgsz = sysconf(_SC_PAGESIZE);
1019 ret = posix_memalign(&ptr, pgsz, pgsz);
1021 DBG("Failed to get a page (%ld) for userptr detection!\n",
1027 userptr.user_ptr = (__u64)(unsigned long)ptr;
1028 userptr.user_size = pgsz;
1031 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
1033 if (errno == ENODEV && userptr.flags == 0) {
1034 userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
1041 /* We don't release the userptr bo here as we want to keep the
1042 * kernel mm tracking alive for our lifetime. The first time we
1043 * create a userptr object the kernel has to install a mmu_notifer
1044 * which is a heavyweight operation (e.g. it requires taking all
1045 * mm_locks and stop_machine()).
1048 bufmgr_gem->userptr_active.ptr = ptr;
1049 bufmgr_gem->userptr_active.handle = userptr.handle;
1054 static drm_intel_bo *
1055 check_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
1058 uint32_t tiling_mode,
1061 unsigned long flags)
1063 if (has_userptr((drm_intel_bufmgr_gem *)bufmgr))
1064 bufmgr->bo_alloc_userptr = drm_intel_gem_bo_alloc_userptr;
1066 bufmgr->bo_alloc_userptr = NULL;
1068 return drm_intel_bo_alloc_userptr(bufmgr, name, addr,
1069 tiling_mode, stride, size, flags);
1072 static int get_tiling_mode(drm_intel_bufmgr_gem *bufmgr_gem,
1073 uint32_t gem_handle,
1074 uint32_t *tiling_mode,
1075 uint32_t *swizzle_mode)
1077 struct drm_i915_gem_get_tiling get_tiling = {
1078 .handle = gem_handle,
1082 ret = drmIoctl(bufmgr_gem->fd,
1083 DRM_IOCTL_I915_GEM_GET_TILING,
1085 if (ret != 0 && errno != EOPNOTSUPP)
1088 *tiling_mode = get_tiling.tiling_mode;
1089 *swizzle_mode = get_tiling.swizzle_mode;
1095 * Returns a drm_intel_bo wrapping the given buffer object handle.
1097 * This can be used when one application needs to pass a buffer object
1100 drm_public drm_intel_bo *
1101 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
1103 unsigned int handle)
1105 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1106 drm_intel_bo_gem *bo_gem;
1108 struct drm_gem_open open_arg;
1110 /* At the moment most applications only have a few named bo.
1111 * For instance, in a DRI client only the render buffers passed
1112 * between X and the client are named. And since X returns the
1113 * alternating names for the front/back buffer a linear search
1114 * provides a sufficiently fast match.
1116 pthread_mutex_lock(&bufmgr_gem->lock);
1117 HASH_FIND(name_hh, bufmgr_gem->name_table,
1118 &handle, sizeof(handle), bo_gem);
1120 drm_intel_gem_bo_reference(&bo_gem->bo);
1125 open_arg.name = handle;
1126 ret = drmIoctl(bufmgr_gem->fd,
1130 DBG("Couldn't reference %s handle 0x%08x: %s\n",
1131 name, handle, strerror(errno));
1135 /* Now see if someone has used a prime handle to get this
1136 * object from the kernel before by looking through the list
1137 * again for a matching gem_handle
1139 HASH_FIND(handle_hh, bufmgr_gem->handle_table,
1140 &open_arg.handle, sizeof(open_arg.handle), bo_gem);
1142 drm_intel_gem_bo_reference(&bo_gem->bo);
1146 bo_gem = calloc(1, sizeof(*bo_gem));
1150 atomic_set(&bo_gem->refcount, 1);
1151 DRMINITLISTHEAD(&bo_gem->vma_list);
1153 bo_gem->bo.size = open_arg.size;
1154 bo_gem->bo.offset = 0;
1155 bo_gem->bo.offset64 = 0;
1156 bo_gem->bo.virtual = NULL;
1157 bo_gem->bo.bufmgr = bufmgr;
1158 bo_gem->name = name;
1159 bo_gem->validate_index = -1;
1160 bo_gem->gem_handle = open_arg.handle;
1161 bo_gem->bo.handle = open_arg.handle;
1162 bo_gem->global_name = handle;
1163 bo_gem->reusable = false;
1165 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
1166 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
1167 HASH_ADD(name_hh, bufmgr_gem->name_table,
1168 global_name, sizeof(bo_gem->global_name), bo_gem);
1170 ret = get_tiling_mode(bufmgr_gem, bo_gem->gem_handle,
1171 &bo_gem->tiling_mode, &bo_gem->swizzle_mode);
1175 /* XXX stride is unknown */
1176 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
1177 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
1180 pthread_mutex_unlock(&bufmgr_gem->lock);
1184 drm_intel_gem_bo_free(&bo_gem->bo);
1185 pthread_mutex_unlock(&bufmgr_gem->lock);
1190 drm_intel_gem_bo_free(drm_intel_bo *bo)
1192 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1193 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1194 struct drm_gem_close close;
1197 DRMLISTDEL(&bo_gem->vma_list);
1198 if (bo_gem->mem_virtual) {
1199 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
1200 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1201 bufmgr_gem->vma_count--;
1203 if (bo_gem->wc_virtual) {
1204 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->wc_virtual, 0));
1205 drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
1206 bufmgr_gem->vma_count--;
1208 if (bo_gem->gtt_virtual) {
1209 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1210 bufmgr_gem->vma_count--;
1213 if (bo_gem->global_name)
1214 HASH_DELETE(name_hh, bufmgr_gem->name_table, bo_gem);
1215 HASH_DELETE(handle_hh, bufmgr_gem->handle_table, bo_gem);
1217 /* Close this object */
1219 close.handle = bo_gem->gem_handle;
1220 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
1222 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
1223 bo_gem->gem_handle, bo_gem->name, strerror(errno));
1229 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
1232 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1234 if (bo_gem->mem_virtual)
1235 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
1237 if (bo_gem->wc_virtual)
1238 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->wc_virtual, bo->size);
1240 if (bo_gem->gtt_virtual)
1241 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
1245 /** Frees all cached buffers significantly older than @time. */
1247 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
1251 if (bufmgr_gem->time == time)
1254 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1255 struct drm_intel_gem_bo_bucket *bucket =
1256 &bufmgr_gem->cache_bucket[i];
1258 while (!DRMLISTEMPTY(&bucket->head)) {
1259 drm_intel_bo_gem *bo_gem;
1261 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1262 bucket->head.next, head);
1263 if (time - bo_gem->free_time <= 1)
1266 DRMLISTDEL(&bo_gem->head);
1268 drm_intel_gem_bo_free(&bo_gem->bo);
1272 bufmgr_gem->time = time;
1275 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
1279 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
1280 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
1282 if (bufmgr_gem->vma_max < 0)
1285 /* We may need to evict a few entries in order to create new mmaps */
1286 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
1290 while (bufmgr_gem->vma_count > limit) {
1291 drm_intel_bo_gem *bo_gem;
1293 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1294 bufmgr_gem->vma_cache.next,
1296 assert(bo_gem->map_count == 0);
1297 DRMLISTDELINIT(&bo_gem->vma_list);
1299 if (bo_gem->mem_virtual) {
1300 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1301 bo_gem->mem_virtual = NULL;
1302 bufmgr_gem->vma_count--;
1304 if (bo_gem->wc_virtual) {
1305 drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
1306 bo_gem->wc_virtual = NULL;
1307 bufmgr_gem->vma_count--;
1309 if (bo_gem->gtt_virtual) {
1310 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1311 bo_gem->gtt_virtual = NULL;
1312 bufmgr_gem->vma_count--;
1317 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1318 drm_intel_bo_gem *bo_gem)
1320 bufmgr_gem->vma_open--;
1321 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1322 if (bo_gem->mem_virtual)
1323 bufmgr_gem->vma_count++;
1324 if (bo_gem->wc_virtual)
1325 bufmgr_gem->vma_count++;
1326 if (bo_gem->gtt_virtual)
1327 bufmgr_gem->vma_count++;
1328 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1331 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1332 drm_intel_bo_gem *bo_gem)
1334 bufmgr_gem->vma_open++;
1335 DRMLISTDEL(&bo_gem->vma_list);
1336 if (bo_gem->mem_virtual)
1337 bufmgr_gem->vma_count--;
1338 if (bo_gem->wc_virtual)
1339 bufmgr_gem->vma_count--;
1340 if (bo_gem->gtt_virtual)
1341 bufmgr_gem->vma_count--;
1342 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1346 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1348 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1349 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1350 struct drm_intel_gem_bo_bucket *bucket;
1353 /* Unreference all the target buffers */
1354 for (i = 0; i < bo_gem->reloc_count; i++) {
1355 if (bo_gem->reloc_target_info[i].bo != bo) {
1356 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1357 reloc_target_info[i].bo,
1361 for (i = 0; i < bo_gem->softpin_target_count; i++)
1362 drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i],
1365 bo_gem->reloc_count = 0;
1366 bo_gem->used_as_reloc_target = false;
1367 bo_gem->softpin_target_count = 0;
1369 DBG("bo_unreference final: %d (%s)\n",
1370 bo_gem->gem_handle, bo_gem->name);
1372 /* release memory associated with this object */
1373 if (bo_gem->reloc_target_info) {
1374 free(bo_gem->reloc_target_info);
1375 bo_gem->reloc_target_info = NULL;
1377 if (bo_gem->relocs) {
1378 free(bo_gem->relocs);
1379 bo_gem->relocs = NULL;
1381 if (bo_gem->softpin_target) {
1382 free(bo_gem->softpin_target);
1383 bo_gem->softpin_target = NULL;
1384 bo_gem->softpin_target_size = 0;
1387 /* Clear any left-over mappings */
1388 if (bo_gem->map_count) {
1389 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1390 bo_gem->map_count = 0;
1391 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1392 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1395 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1396 /* Put the buffer into our internal cache for reuse if we can. */
1397 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1398 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1399 I915_MADV_DONTNEED)) {
1400 bo_gem->free_time = time;
1402 bo_gem->name = NULL;
1403 bo_gem->validate_index = -1;
1405 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1407 drm_intel_gem_bo_free(bo);
1411 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1414 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1416 assert(atomic_read(&bo_gem->refcount) > 0);
1417 if (atomic_dec_and_test(&bo_gem->refcount))
1418 drm_intel_gem_bo_unreference_final(bo, time);
1421 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1423 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1425 assert(atomic_read(&bo_gem->refcount) > 0);
1427 if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
1428 drm_intel_bufmgr_gem *bufmgr_gem =
1429 (drm_intel_bufmgr_gem *) bo->bufmgr;
1430 struct timespec time;
1432 clock_gettime(CLOCK_MONOTONIC, &time);
1434 pthread_mutex_lock(&bufmgr_gem->lock);
1436 if (atomic_dec_and_test(&bo_gem->refcount)) {
1437 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1438 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1441 pthread_mutex_unlock(&bufmgr_gem->lock);
1445 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1447 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1448 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1449 struct drm_i915_gem_set_domain set_domain;
1452 if (bo_gem->is_userptr) {
1453 /* Return the same user ptr */
1454 bo->virtual = bo_gem->user_virtual;
1458 pthread_mutex_lock(&bufmgr_gem->lock);
1460 if (bo_gem->map_count++ == 0)
1461 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1463 if (!bo_gem->mem_virtual) {
1464 struct drm_i915_gem_mmap mmap_arg;
1466 DBG("bo_map: %d (%s), map_count=%d\n",
1467 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1470 mmap_arg.handle = bo_gem->gem_handle;
1471 mmap_arg.size = bo->size;
1472 ret = drmIoctl(bufmgr_gem->fd,
1473 DRM_IOCTL_I915_GEM_MMAP,
1477 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1478 __FILE__, __LINE__, bo_gem->gem_handle,
1479 bo_gem->name, strerror(errno));
1480 if (--bo_gem->map_count == 0)
1481 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1482 pthread_mutex_unlock(&bufmgr_gem->lock);
1485 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1486 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1488 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1489 bo_gem->mem_virtual);
1490 bo->virtual = bo_gem->mem_virtual;
1492 memclear(set_domain);
1493 set_domain.handle = bo_gem->gem_handle;
1494 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1496 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1498 set_domain.write_domain = 0;
1499 ret = drmIoctl(bufmgr_gem->fd,
1500 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1503 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1504 __FILE__, __LINE__, bo_gem->gem_handle,
1509 bo_gem->mapped_cpu_write = true;
1511 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1512 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1513 pthread_mutex_unlock(&bufmgr_gem->lock);
1519 map_gtt(drm_intel_bo *bo)
1521 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1522 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1525 if (bo_gem->is_userptr)
1528 if (bo_gem->map_count++ == 0)
1529 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1531 /* Get a mapping of the buffer if we haven't before. */
1532 if (bo_gem->gtt_virtual == NULL) {
1533 struct drm_i915_gem_mmap_gtt mmap_arg;
1535 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1536 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1539 mmap_arg.handle = bo_gem->gem_handle;
1541 /* Get the fake offset back... */
1542 ret = drmIoctl(bufmgr_gem->fd,
1543 DRM_IOCTL_I915_GEM_MMAP_GTT,
1547 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1549 bo_gem->gem_handle, bo_gem->name,
1551 if (--bo_gem->map_count == 0)
1552 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1557 bo_gem->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1558 MAP_SHARED, bufmgr_gem->fd,
1560 if (bo_gem->gtt_virtual == MAP_FAILED) {
1561 bo_gem->gtt_virtual = NULL;
1563 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1565 bo_gem->gem_handle, bo_gem->name,
1567 if (--bo_gem->map_count == 0)
1568 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1573 bo->virtual = bo_gem->gtt_virtual;
1575 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1576 bo_gem->gtt_virtual);
1582 drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1584 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1585 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1586 struct drm_i915_gem_set_domain set_domain;
1589 pthread_mutex_lock(&bufmgr_gem->lock);
1593 pthread_mutex_unlock(&bufmgr_gem->lock);
1597 /* Now move it to the GTT domain so that the GPU and CPU
1598 * caches are flushed and the GPU isn't actively using the
1601 * The pagefault handler does this domain change for us when
1602 * it has unbound the BO from the GTT, but it's up to us to
1603 * tell it when we're about to use things if we had done
1604 * rendering and it still happens to be bound to the GTT.
1606 memclear(set_domain);
1607 set_domain.handle = bo_gem->gem_handle;
1608 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1609 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1610 ret = drmIoctl(bufmgr_gem->fd,
1611 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1614 DBG("%s:%d: Error setting domain %d: %s\n",
1615 __FILE__, __LINE__, bo_gem->gem_handle,
1619 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1620 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1621 pthread_mutex_unlock(&bufmgr_gem->lock);
1627 * Performs a mapping of the buffer object like the normal GTT
1628 * mapping, but avoids waiting for the GPU to be done reading from or
1629 * rendering to the buffer.
1631 * This is used in the implementation of GL_ARB_map_buffer_range: The
1632 * user asks to create a buffer, then does a mapping, fills some
1633 * space, runs a drawing command, then asks to map it again without
1634 * synchronizing because it guarantees that it won't write over the
1635 * data that the GPU is busy using (or, more specifically, that if it
1636 * does write over the data, it acknowledges that rendering is
1641 drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1643 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1645 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1649 /* If the CPU cache isn't coherent with the GTT, then use a
1650 * regular synchronized mapping. The problem is that we don't
1651 * track where the buffer was last used on the CPU side in
1652 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1653 * we would potentially corrupt the buffer even when the user
1654 * does reasonable things.
1656 if (!bufmgr_gem->has_llc)
1657 return drm_intel_gem_bo_map_gtt(bo);
1659 pthread_mutex_lock(&bufmgr_gem->lock);
1663 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1664 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1667 pthread_mutex_unlock(&bufmgr_gem->lock);
1672 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1674 drm_intel_bufmgr_gem *bufmgr_gem;
1675 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1681 if (bo_gem->is_userptr)
1684 bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1686 pthread_mutex_lock(&bufmgr_gem->lock);
1688 if (bo_gem->map_count <= 0) {
1689 DBG("attempted to unmap an unmapped bo\n");
1690 pthread_mutex_unlock(&bufmgr_gem->lock);
1691 /* Preserve the old behaviour of just treating this as a
1692 * no-op rather than reporting the error.
1697 if (bo_gem->mapped_cpu_write) {
1698 struct drm_i915_gem_sw_finish sw_finish;
1700 /* Cause a flush to happen if the buffer's pinned for
1701 * scanout, so the results show up in a timely manner.
1702 * Unlike GTT set domains, this only does work if the
1703 * buffer should be scanout-related.
1705 memclear(sw_finish);
1706 sw_finish.handle = bo_gem->gem_handle;
1707 ret = drmIoctl(bufmgr_gem->fd,
1708 DRM_IOCTL_I915_GEM_SW_FINISH,
1710 ret = ret == -1 ? -errno : 0;
1712 bo_gem->mapped_cpu_write = false;
1715 /* We need to unmap after every innovation as we cannot track
1716 * an open vma for every bo as that will exhaust the system
1717 * limits and cause later failures.
1719 if (--bo_gem->map_count == 0) {
1720 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1721 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1724 pthread_mutex_unlock(&bufmgr_gem->lock);
1730 drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1732 return drm_intel_gem_bo_unmap(bo);
1736 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1737 unsigned long size, const void *data)
1739 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1740 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1741 struct drm_i915_gem_pwrite pwrite;
1744 if (bo_gem->is_userptr)
1748 pwrite.handle = bo_gem->gem_handle;
1749 pwrite.offset = offset;
1751 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1752 ret = drmIoctl(bufmgr_gem->fd,
1753 DRM_IOCTL_I915_GEM_PWRITE,
1757 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1758 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1759 (int)size, strerror(errno));
1766 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1768 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1769 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1772 memclear(get_pipe_from_crtc_id);
1773 get_pipe_from_crtc_id.crtc_id = crtc_id;
1774 ret = drmIoctl(bufmgr_gem->fd,
1775 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1776 &get_pipe_from_crtc_id);
1778 /* We return -1 here to signal that we don't
1779 * know which pipe is associated with this crtc.
1780 * This lets the caller know that this information
1781 * isn't available; using the wrong pipe for
1782 * vblank waiting can cause the chipset to lock up
1787 return get_pipe_from_crtc_id.pipe;
1791 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1792 unsigned long size, void *data)
1794 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1795 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1796 struct drm_i915_gem_pread pread;
1799 if (bo_gem->is_userptr)
1803 pread.handle = bo_gem->gem_handle;
1804 pread.offset = offset;
1806 pread.data_ptr = (uint64_t) (uintptr_t) data;
1807 ret = drmIoctl(bufmgr_gem->fd,
1808 DRM_IOCTL_I915_GEM_PREAD,
1812 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1813 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1814 (int)size, strerror(errno));
1820 /** Waits for all GPU rendering with the object to have completed. */
1822 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1824 drm_intel_gem_bo_start_gtt_access(bo, 1);
1828 * Waits on a BO for the given amount of time.
1830 * @bo: buffer object to wait for
1831 * @timeout_ns: amount of time to wait in nanoseconds.
1832 * If value is less than 0, an infinite wait will occur.
1834 * Returns 0 if the wait was successful ie. the last batch referencing the
1835 * object has completed within the allotted time. Otherwise some negative return
1836 * value describes the error. Of particular interest is -ETIME when the wait has
1837 * failed to yield the desired result.
1839 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1840 * the operation to give up after a certain amount of time. Another subtle
1841 * difference is the internal locking semantics are different (this variant does
1842 * not hold the lock for the duration of the wait). This makes the wait subject
1843 * to a larger userspace race window.
1845 * The implementation shall wait until the object is no longer actively
1846 * referenced within a batch buffer at the time of the call. The wait will
1847 * not guarantee that the buffer is re-issued via another thread, or an flinked
1848 * handle. Userspace must make sure this race does not occur if such precision
1851 * Note that some kernels have broken the inifite wait for negative values
1852 * promise, upgrade to latest stable kernels if this is the case.
1855 drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
1857 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1858 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1859 struct drm_i915_gem_wait wait;
1862 if (!bufmgr_gem->has_wait_timeout) {
1863 DBG("%s:%d: Timed wait is not supported. Falling back to "
1864 "infinite wait\n", __FILE__, __LINE__);
1866 drm_intel_gem_bo_wait_rendering(bo);
1869 return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
1874 wait.bo_handle = bo_gem->gem_handle;
1875 wait.timeout_ns = timeout_ns;
1876 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1884 * Sets the object to the GTT read and possibly write domain, used by the X
1885 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1887 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1888 * can do tiled pixmaps this way.
1891 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1893 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1894 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1895 struct drm_i915_gem_set_domain set_domain;
1898 memclear(set_domain);
1899 set_domain.handle = bo_gem->gem_handle;
1900 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1901 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1902 ret = drmIoctl(bufmgr_gem->fd,
1903 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1906 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1907 __FILE__, __LINE__, bo_gem->gem_handle,
1908 set_domain.read_domains, set_domain.write_domain,
1914 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1916 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1917 struct drm_gem_close close_bo;
1920 free(bufmgr_gem->exec2_objects);
1921 free(bufmgr_gem->exec_objects);
1922 free(bufmgr_gem->exec_bos);
1924 pthread_mutex_destroy(&bufmgr_gem->lock);
1926 /* Free any cached buffer objects we were going to reuse */
1927 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1928 struct drm_intel_gem_bo_bucket *bucket =
1929 &bufmgr_gem->cache_bucket[i];
1930 drm_intel_bo_gem *bo_gem;
1932 while (!DRMLISTEMPTY(&bucket->head)) {
1933 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1934 bucket->head.next, head);
1935 DRMLISTDEL(&bo_gem->head);
1937 drm_intel_gem_bo_free(&bo_gem->bo);
1941 /* Release userptr bo kept hanging around for optimisation. */
1942 if (bufmgr_gem->userptr_active.ptr) {
1944 close_bo.handle = bufmgr_gem->userptr_active.handle;
1945 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
1946 free(bufmgr_gem->userptr_active.ptr);
1949 "Failed to release test userptr object! (%d) "
1950 "i915 kernel driver may not be sane!\n", errno);
1957 * Adds the target buffer to the validation list and adds the relocation
1958 * to the reloc_buffer's relocation list.
1960 * The relocation entry at the given offset must already contain the
1961 * precomputed relocation value, because the kernel will optimize out
1962 * the relocation entry write when the buffer hasn't moved from the
1963 * last known offset in target_bo.
1966 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1967 drm_intel_bo *target_bo, uint32_t target_offset,
1968 uint32_t read_domains, uint32_t write_domain,
1971 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1972 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1973 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1974 bool fenced_command;
1976 if (bo_gem->has_error)
1979 if (target_bo_gem->has_error) {
1980 bo_gem->has_error = true;
1984 /* We never use HW fences for rendering on 965+ */
1985 if (bufmgr_gem->gen >= 4)
1988 fenced_command = need_fence;
1989 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1992 /* Create a new relocation list if needed */
1993 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1996 /* Check overflow */
1997 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
2000 assert(offset <= bo->size - 4);
2001 assert((write_domain & (write_domain - 1)) == 0);
2003 /* An object needing a fence is a tiled buffer, so it won't have
2004 * relocs to other buffers.
2007 assert(target_bo_gem->reloc_count == 0);
2008 target_bo_gem->reloc_tree_fences = 1;
2011 /* Make sure that we're not adding a reloc to something whose size has
2012 * already been accounted for.
2014 assert(!bo_gem->used_as_reloc_target);
2015 if (target_bo_gem != bo_gem) {
2016 target_bo_gem->used_as_reloc_target = true;
2017 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
2018 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
2021 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
2022 if (target_bo != bo)
2023 drm_intel_gem_bo_reference(target_bo);
2025 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
2026 DRM_INTEL_RELOC_FENCE;
2028 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
2030 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
2031 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
2032 bo_gem->relocs[bo_gem->reloc_count].target_handle =
2033 target_bo_gem->gem_handle;
2034 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
2035 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
2036 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
2037 bo_gem->reloc_count++;
2043 drm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
2045 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2048 bo_gem->kflags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
2050 bo_gem->kflags &= ~EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
2054 drm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo)
2056 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2057 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2058 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2059 if (bo_gem->has_error)
2062 if (target_bo_gem->has_error) {
2063 bo_gem->has_error = true;
2067 if (!(target_bo_gem->kflags & EXEC_OBJECT_PINNED))
2069 if (target_bo_gem == bo_gem)
2072 if (bo_gem->softpin_target_count == bo_gem->softpin_target_size) {
2073 int new_size = bo_gem->softpin_target_size * 2;
2075 new_size = bufmgr_gem->max_relocs;
2077 bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size *
2078 sizeof(drm_intel_bo *));
2079 if (!bo_gem->softpin_target)
2082 bo_gem->softpin_target_size = new_size;
2084 bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo;
2085 drm_intel_gem_bo_reference(target_bo);
2086 bo_gem->softpin_target_count++;
2092 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
2093 drm_intel_bo *target_bo, uint32_t target_offset,
2094 uint32_t read_domains, uint32_t write_domain)
2096 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2097 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
2099 if (target_bo_gem->kflags & EXEC_OBJECT_PINNED)
2100 return drm_intel_gem_bo_add_softpin_target(bo, target_bo);
2102 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
2103 read_domains, write_domain,
2104 !bufmgr_gem->fenced_relocs);
2108 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
2109 drm_intel_bo *target_bo,
2110 uint32_t target_offset,
2111 uint32_t read_domains, uint32_t write_domain)
2113 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
2114 read_domains, write_domain, true);
2118 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
2120 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2122 return bo_gem->reloc_count;
2126 * Removes existing relocation entries in the BO after "start".
2128 * This allows a user to avoid a two-step process for state setup with
2129 * counting up all the buffer objects and doing a
2130 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
2131 * relocations for the state setup. Instead, save the state of the
2132 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
2133 * state, and then check if it still fits in the aperture.
2135 * Any further drm_intel_bufmgr_check_aperture_space() queries
2136 * involving this buffer in the tree are undefined after this call.
2138 * This also removes all softpinned targets being referenced by the BO.
2141 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
2143 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2144 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2146 struct timespec time;
2148 clock_gettime(CLOCK_MONOTONIC, &time);
2150 assert(bo_gem->reloc_count >= start);
2152 /* Unreference the cleared target buffers */
2153 pthread_mutex_lock(&bufmgr_gem->lock);
2155 for (i = start; i < bo_gem->reloc_count; i++) {
2156 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
2157 if (&target_bo_gem->bo != bo) {
2158 bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
2159 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
2163 bo_gem->reloc_count = start;
2165 for (i = 0; i < bo_gem->softpin_target_count; i++) {
2166 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->softpin_target[i];
2167 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec);
2169 bo_gem->softpin_target_count = 0;
2171 pthread_mutex_unlock(&bufmgr_gem->lock);
2176 * Walk the tree of relocations rooted at BO and accumulate the list of
2177 * validations to be performed and update the relocation buffers with
2178 * index values into the validation list.
2181 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
2183 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2186 if (bo_gem->relocs == NULL)
2189 for (i = 0; i < bo_gem->reloc_count; i++) {
2190 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2192 if (target_bo == bo)
2195 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2197 /* Continue walking the tree depth-first. */
2198 drm_intel_gem_bo_process_reloc(target_bo);
2200 /* Add the target to the validate list */
2201 drm_intel_add_validate_buffer(target_bo);
2206 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
2208 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2211 if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL)
2214 for (i = 0; i < bo_gem->reloc_count; i++) {
2215 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2218 if (target_bo == bo)
2221 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2223 /* Continue walking the tree depth-first. */
2224 drm_intel_gem_bo_process_reloc2(target_bo);
2226 need_fence = (bo_gem->reloc_target_info[i].flags &
2227 DRM_INTEL_RELOC_FENCE);
2229 /* Add the target to the validate list */
2230 drm_intel_add_validate_buffer2(target_bo, need_fence);
2233 for (i = 0; i < bo_gem->softpin_target_count; i++) {
2234 drm_intel_bo *target_bo = bo_gem->softpin_target[i];
2236 if (target_bo == bo)
2239 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2240 drm_intel_gem_bo_process_reloc2(target_bo);
2241 drm_intel_add_validate_buffer2(target_bo, false);
2247 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
2251 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2252 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2253 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2255 /* Update the buffer offset */
2256 if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
2257 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2258 bo_gem->gem_handle, bo_gem->name,
2259 upper_32_bits(bo->offset64),
2260 lower_32_bits(bo->offset64),
2261 upper_32_bits(bufmgr_gem->exec_objects[i].offset),
2262 lower_32_bits(bufmgr_gem->exec_objects[i].offset));
2263 bo->offset64 = bufmgr_gem->exec_objects[i].offset;
2264 bo->offset = bufmgr_gem->exec_objects[i].offset;
2270 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
2274 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2275 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2276 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2278 /* Update the buffer offset */
2279 if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
2280 /* If we're seeing softpinned object here it means that the kernel
2281 * has relocated our object... Indicating a programming error
2283 assert(!(bo_gem->kflags & EXEC_OBJECT_PINNED));
2284 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2285 bo_gem->gem_handle, bo_gem->name,
2286 upper_32_bits(bo->offset64),
2287 lower_32_bits(bo->offset64),
2288 upper_32_bits(bufmgr_gem->exec2_objects[i].offset),
2289 lower_32_bits(bufmgr_gem->exec2_objects[i].offset));
2290 bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
2291 bo->offset = bufmgr_gem->exec2_objects[i].offset;
2297 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
2298 int x1, int y1, int width, int height,
2299 enum aub_dump_bmp_format format,
2300 int pitch, int offset)
2305 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2306 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
2308 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2309 struct drm_i915_gem_execbuffer execbuf;
2312 if (to_bo_gem(bo)->has_error)
2315 pthread_mutex_lock(&bufmgr_gem->lock);
2316 /* Update indices and set up the validate list. */
2317 drm_intel_gem_bo_process_reloc(bo);
2319 /* Add the batch buffer to the validation list. There are no
2320 * relocations pointing to it.
2322 drm_intel_add_validate_buffer(bo);
2325 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2326 execbuf.buffer_count = bufmgr_gem->exec_count;
2327 execbuf.batch_start_offset = 0;
2328 execbuf.batch_len = used;
2329 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2330 execbuf.num_cliprects = num_cliprects;
2334 ret = drmIoctl(bufmgr_gem->fd,
2335 DRM_IOCTL_I915_GEM_EXECBUFFER,
2339 if (errno == ENOSPC) {
2340 DBG("Execbuffer fails to pin. "
2341 "Estimate: %u. Actual: %u. Available: %u\n",
2342 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2345 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2348 (unsigned int)bufmgr_gem->gtt_size);
2351 drm_intel_update_buffer_offsets(bufmgr_gem);
2353 if (bufmgr_gem->bufmgr.debug)
2354 drm_intel_gem_dump_validation_list(bufmgr_gem);
2356 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2357 drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
2359 bo_gem->idle = false;
2361 /* Disconnect the buffer from the validate list */
2362 bo_gem->validate_index = -1;
2363 bufmgr_gem->exec_bos[i] = NULL;
2365 bufmgr_gem->exec_count = 0;
2366 pthread_mutex_unlock(&bufmgr_gem->lock);
2372 do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2373 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2374 int in_fence, int *out_fence,
2377 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2378 struct drm_i915_gem_execbuffer2 execbuf;
2382 if (to_bo_gem(bo)->has_error)
2385 switch (flags & 0x7) {
2389 if (!bufmgr_gem->has_blt)
2393 if (!bufmgr_gem->has_bsd)
2396 case I915_EXEC_VEBOX:
2397 if (!bufmgr_gem->has_vebox)
2400 case I915_EXEC_RENDER:
2401 case I915_EXEC_DEFAULT:
2405 pthread_mutex_lock(&bufmgr_gem->lock);
2406 /* Update indices and set up the validate list. */
2407 drm_intel_gem_bo_process_reloc2(bo);
2409 /* Add the batch buffer to the validation list. There are no relocations
2412 drm_intel_add_validate_buffer2(bo, 0);
2415 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2416 execbuf.buffer_count = bufmgr_gem->exec_count;
2417 execbuf.batch_start_offset = 0;
2418 execbuf.batch_len = used;
2419 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2420 execbuf.num_cliprects = num_cliprects;
2423 execbuf.flags = flags;
2425 i915_execbuffer2_set_context_id(execbuf, 0);
2427 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
2429 if (in_fence != -1) {
2430 execbuf.rsvd2 = in_fence;
2431 execbuf.flags |= I915_EXEC_FENCE_IN;
2433 if (out_fence != NULL) {
2435 execbuf.flags |= I915_EXEC_FENCE_OUT;
2438 if (bufmgr_gem->no_exec)
2439 goto skip_execution;
2441 ret = drmIoctl(bufmgr_gem->fd,
2442 DRM_IOCTL_I915_GEM_EXECBUFFER2_WR,
2446 if (ret == -ENOSPC) {
2447 DBG("Execbuffer fails to pin. "
2448 "Estimate: %u. Actual: %u. Available: %u\n",
2449 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2450 bufmgr_gem->exec_count),
2451 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2452 bufmgr_gem->exec_count),
2453 (unsigned int) bufmgr_gem->gtt_size);
2456 drm_intel_update_buffer_offsets2(bufmgr_gem);
2458 if (ret == 0 && out_fence != NULL)
2459 *out_fence = execbuf.rsvd2 >> 32;
2462 if (bufmgr_gem->bufmgr.debug)
2463 drm_intel_gem_dump_validation_list(bufmgr_gem);
2465 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2466 drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
2468 bo_gem->idle = false;
2470 /* Disconnect the buffer from the validate list */
2471 bo_gem->validate_index = -1;
2472 bufmgr_gem->exec_bos[i] = NULL;
2474 bufmgr_gem->exec_count = 0;
2475 pthread_mutex_unlock(&bufmgr_gem->lock);
2481 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2482 drm_clip_rect_t *cliprects, int num_cliprects,
2485 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2486 -1, NULL, I915_EXEC_RENDER);
2490 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2491 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2494 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2499 drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2500 int used, unsigned int flags)
2502 return do_exec2(bo, used, ctx, NULL, 0, 0, -1, NULL, flags);
2506 drm_intel_gem_bo_fence_exec(drm_intel_bo *bo,
2507 drm_intel_context *ctx,
2513 return do_exec2(bo, used, ctx, NULL, 0, 0, in_fence, out_fence, flags);
2517 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2519 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2520 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2521 struct drm_i915_gem_pin pin;
2525 pin.handle = bo_gem->gem_handle;
2526 pin.alignment = alignment;
2528 ret = drmIoctl(bufmgr_gem->fd,
2529 DRM_IOCTL_I915_GEM_PIN,
2534 bo->offset64 = pin.offset;
2535 bo->offset = pin.offset;
2540 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2542 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2543 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2544 struct drm_i915_gem_unpin unpin;
2548 unpin.handle = bo_gem->gem_handle;
2550 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2558 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2559 uint32_t tiling_mode,
2562 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2563 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2564 struct drm_i915_gem_set_tiling set_tiling;
2567 if (bo_gem->global_name == 0 &&
2568 tiling_mode == bo_gem->tiling_mode &&
2569 stride == bo_gem->stride)
2572 memset(&set_tiling, 0, sizeof(set_tiling));
2574 /* set_tiling is slightly broken and overwrites the
2575 * input on the error path, so we have to open code
2578 set_tiling.handle = bo_gem->gem_handle;
2579 set_tiling.tiling_mode = tiling_mode;
2580 set_tiling.stride = stride;
2582 ret = ioctl(bufmgr_gem->fd,
2583 DRM_IOCTL_I915_GEM_SET_TILING,
2585 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2589 bo_gem->tiling_mode = set_tiling.tiling_mode;
2590 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2591 bo_gem->stride = set_tiling.stride;
2596 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2599 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2600 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2603 /* Tiling with userptr surfaces is not supported
2604 * on all hardware so refuse it for time being.
2606 if (bo_gem->is_userptr)
2609 /* Linear buffers have no stride. By ensuring that we only ever use
2610 * stride 0 with linear buffers, we simplify our code.
2612 if (*tiling_mode == I915_TILING_NONE)
2615 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2617 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2619 *tiling_mode = bo_gem->tiling_mode;
2624 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2625 uint32_t * swizzle_mode)
2627 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2629 *tiling_mode = bo_gem->tiling_mode;
2630 *swizzle_mode = bo_gem->swizzle_mode;
2635 drm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
2637 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2639 bo->offset64 = offset;
2640 bo->offset = offset;
2641 bo_gem->kflags |= EXEC_OBJECT_PINNED;
2646 drm_public drm_intel_bo *
2647 drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
2649 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2652 drm_intel_bo_gem *bo_gem;
2654 pthread_mutex_lock(&bufmgr_gem->lock);
2655 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2657 DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno));
2658 pthread_mutex_unlock(&bufmgr_gem->lock);
2663 * See if the kernel has already returned this buffer to us. Just as
2664 * for named buffers, we must not create two bo's pointing at the same
2667 HASH_FIND(handle_hh, bufmgr_gem->handle_table,
2668 &handle, sizeof(handle), bo_gem);
2670 drm_intel_gem_bo_reference(&bo_gem->bo);
2674 bo_gem = calloc(1, sizeof(*bo_gem));
2678 atomic_set(&bo_gem->refcount, 1);
2679 DRMINITLISTHEAD(&bo_gem->vma_list);
2681 /* Determine size of bo. The fd-to-handle ioctl really should
2682 * return the size, but it doesn't. If we have kernel 3.12 or
2683 * later, we can lseek on the prime fd to get the size. Older
2684 * kernels will just fail, in which case we fall back to the
2685 * provided (estimated or guess size). */
2686 ret = lseek(prime_fd, 0, SEEK_END);
2688 bo_gem->bo.size = ret;
2690 bo_gem->bo.size = size;
2692 bo_gem->bo.handle = handle;
2693 bo_gem->bo.bufmgr = bufmgr;
2695 bo_gem->gem_handle = handle;
2696 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
2697 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
2699 bo_gem->name = "prime";
2700 bo_gem->validate_index = -1;
2701 bo_gem->reloc_tree_fences = 0;
2702 bo_gem->used_as_reloc_target = false;
2703 bo_gem->has_error = false;
2704 bo_gem->reusable = false;
2706 ret = get_tiling_mode(bufmgr_gem, handle,
2707 &bo_gem->tiling_mode, &bo_gem->swizzle_mode);
2711 /* XXX stride is unknown */
2712 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2715 pthread_mutex_unlock(&bufmgr_gem->lock);
2719 drm_intel_gem_bo_free(&bo_gem->bo);
2720 pthread_mutex_unlock(&bufmgr_gem->lock);
2725 drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
2727 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2728 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2730 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
2731 DRM_CLOEXEC | DRM_RDWR, prime_fd) != 0)
2734 bo_gem->reusable = false;
2740 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2742 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2743 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2745 if (!bo_gem->global_name) {
2746 struct drm_gem_flink flink;
2749 flink.handle = bo_gem->gem_handle;
2750 if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink))
2753 pthread_mutex_lock(&bufmgr_gem->lock);
2754 if (!bo_gem->global_name) {
2755 bo_gem->global_name = flink.name;
2756 bo_gem->reusable = false;
2758 HASH_ADD(name_hh, bufmgr_gem->name_table,
2759 global_name, sizeof(bo_gem->global_name),
2762 pthread_mutex_unlock(&bufmgr_gem->lock);
2765 *name = bo_gem->global_name;
2770 * Enables unlimited caching of buffer objects for reuse.
2772 * This is potentially very memory expensive, as the cache at each bucket
2773 * size is only bounded by how many buffers of that size we've managed to have
2774 * in flight at once.
2777 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2779 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2781 bufmgr_gem->bo_reuse = true;
2785 * Disables implicit synchronisation before executing the bo
2787 * This will cause rendering corruption unless you correctly manage explicit
2788 * fences for all rendering involving this buffer - including use by others.
2789 * Disabling the implicit serialisation is only required if that serialisation
2790 * is too coarse (for example, you have split the buffer into many
2791 * non-overlapping regions and are sharing the whole buffer between concurrent
2792 * independent command streams).
2794 * Note the kernel must advertise support via I915_PARAM_HAS_EXEC_ASYNC,
2795 * which can be checked using drm_intel_bufmgr_can_disable_implicit_sync,
2796 * or subsequent execbufs involving the bo will generate EINVAL.
2799 drm_intel_gem_bo_disable_implicit_sync(drm_intel_bo *bo)
2801 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2803 bo_gem->kflags |= EXEC_OBJECT_ASYNC;
2807 * Enables implicit synchronisation before executing the bo
2809 * This is the default behaviour of the kernel, to wait upon prior writes
2810 * completing on the object before rendering with it, or to wait for prior
2811 * reads to complete before writing into the object.
2812 * drm_intel_gem_bo_disable_implicit_sync() can stop this behaviour, telling
2813 * the kernel never to insert a stall before using the object. Then this
2814 * function can be used to restore the implicit sync before subsequent
2818 drm_intel_gem_bo_enable_implicit_sync(drm_intel_bo *bo)
2820 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2822 bo_gem->kflags &= ~EXEC_OBJECT_ASYNC;
2826 * Query whether the kernel supports disabling of its implicit synchronisation
2827 * before execbuf. See drm_intel_gem_bo_disable_implicit_sync()
2830 drm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr *bufmgr)
2832 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2834 return bufmgr_gem->has_exec_async;
2838 * Enable use of fenced reloc type.
2840 * New code should enable this to avoid unnecessary fence register
2841 * allocation. If this option is not enabled, all relocs will have fence
2842 * register allocated.
2845 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2847 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2849 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2850 bufmgr_gem->fenced_relocs = true;
2854 * Return the additional aperture space required by the tree of buffer objects
2858 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2860 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2864 if (bo == NULL || bo_gem->included_in_check_aperture)
2868 bo_gem->included_in_check_aperture = true;
2870 for (i = 0; i < bo_gem->reloc_count; i++)
2872 drm_intel_gem_bo_get_aperture_space(bo_gem->
2873 reloc_target_info[i].bo);
2879 * Count the number of buffers in this list that need a fence reg
2881 * If the count is greater than the number of available regs, we'll have
2882 * to ask the caller to resubmit a batch with fewer tiled buffers.
2884 * This function over-counts if the same buffer is used multiple times.
2887 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2890 unsigned int total = 0;
2892 for (i = 0; i < count; i++) {
2893 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2898 total += bo_gem->reloc_tree_fences;
2904 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2905 * for the next drm_intel_bufmgr_check_aperture_space() call.
2908 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2910 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2913 if (bo == NULL || !bo_gem->included_in_check_aperture)
2916 bo_gem->included_in_check_aperture = false;
2918 for (i = 0; i < bo_gem->reloc_count; i++)
2919 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2920 reloc_target_info[i].bo);
2924 * Return a conservative estimate for the amount of aperture required
2925 * for a collection of buffers. This may double-count some buffers.
2928 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2931 unsigned int total = 0;
2933 for (i = 0; i < count; i++) {
2934 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2936 total += bo_gem->reloc_tree_size;
2942 * Return the amount of aperture needed for a collection of buffers.
2943 * This avoids double counting any buffers, at the cost of looking
2944 * at every buffer in the set.
2947 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2950 unsigned int total = 0;
2952 for (i = 0; i < count; i++) {
2953 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2954 /* For the first buffer object in the array, we get an
2955 * accurate count back for its reloc_tree size (since nothing
2956 * had been flagged as being counted yet). We can save that
2957 * value out as a more conservative reloc_tree_size that
2958 * avoids double-counting target buffers. Since the first
2959 * buffer happens to usually be the batch buffer in our
2960 * callers, this can pull us back from doing the tree
2961 * walk on every new batch emit.
2964 drm_intel_bo_gem *bo_gem =
2965 (drm_intel_bo_gem *) bo_array[i];
2966 bo_gem->reloc_tree_size = total;
2970 for (i = 0; i < count; i++)
2971 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2976 * Return -1 if the batchbuffer should be flushed before attempting to
2977 * emit rendering referencing the buffers pointed to by bo_array.
2979 * This is required because if we try to emit a batchbuffer with relocations
2980 * to a tree of buffers that won't simultaneously fit in the aperture,
2981 * the rendering will return an error at a point where the software is not
2982 * prepared to recover from it.
2984 * However, we also want to emit the batchbuffer significantly before we reach
2985 * the limit, as a series of batchbuffers each of which references buffers
2986 * covering almost all of the aperture means that at each emit we end up
2987 * waiting to evict a buffer from the last rendering, and we get synchronous
2988 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2989 * get better parallelism.
2992 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2994 drm_intel_bufmgr_gem *bufmgr_gem =
2995 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2996 unsigned int total = 0;
2997 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
3000 /* Check for fence reg constraints if necessary */
3001 if (bufmgr_gem->available_fences) {
3002 total_fences = drm_intel_gem_total_fences(bo_array, count);
3003 if (total_fences > bufmgr_gem->available_fences)
3007 total = drm_intel_gem_estimate_batch_space(bo_array, count);
3009 if (total > threshold)
3010 total = drm_intel_gem_compute_batch_space(bo_array, count);
3012 if (total > threshold) {
3013 DBG("check_space: overflowed available aperture, "
3015 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
3018 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
3019 (int)bufmgr_gem->gtt_size / 1024);
3025 * Disable buffer reuse for objects which are shared with the kernel
3026 * as scanout buffers
3029 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
3031 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3033 bo_gem->reusable = false;
3038 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
3040 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3042 return bo_gem->reusable;
3046 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
3048 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3051 for (i = 0; i < bo_gem->reloc_count; i++) {
3052 if (bo_gem->reloc_target_info[i].bo == target_bo)
3054 if (bo == bo_gem->reloc_target_info[i].bo)
3056 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
3061 for (i = 0; i< bo_gem->softpin_target_count; i++) {
3062 if (bo_gem->softpin_target[i] == target_bo)
3064 if (_drm_intel_gem_bo_references(bo_gem->softpin_target[i], target_bo))
3071 /** Return true if target_bo is referenced by bo's relocation tree. */
3073 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
3075 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
3077 if (bo == NULL || target_bo == NULL)
3079 if (target_bo_gem->used_as_reloc_target)
3080 return _drm_intel_gem_bo_references(bo, target_bo);
3085 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
3087 unsigned int i = bufmgr_gem->num_buckets;
3089 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
3091 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
3092 bufmgr_gem->cache_bucket[i].size = size;
3093 bufmgr_gem->num_buckets++;
3097 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
3099 unsigned long size, cache_max_size = 64 * 1024 * 1024;
3101 /* OK, so power of two buckets was too wasteful of memory.
3102 * Give 3 other sizes between each power of two, to hopefully
3103 * cover things accurately enough. (The alternative is
3104 * probably to just go for exact matching of sizes, and assume
3105 * that for things like composited window resize the tiled
3106 * width/height alignment and rounding of sizes to pages will
3107 * get us useful cache hit rates anyway)
3109 add_bucket(bufmgr_gem, 4096);
3110 add_bucket(bufmgr_gem, 4096 * 2);
3111 add_bucket(bufmgr_gem, 4096 * 3);
3113 /* Initialize the linked lists for BO reuse cache. */
3114 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
3115 add_bucket(bufmgr_gem, size);
3117 add_bucket(bufmgr_gem, size + size * 1 / 4);
3118 add_bucket(bufmgr_gem, size + size * 2 / 4);
3119 add_bucket(bufmgr_gem, size + size * 3 / 4);
3124 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
3126 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3128 bufmgr_gem->vma_max = limit;
3130 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
3134 parse_devid_override(const char *devid_override)
3136 static const struct {
3140 { "brw", PCI_CHIP_I965_GM },
3141 { "g4x", PCI_CHIP_GM45_GM },
3142 { "ilk", PCI_CHIP_ILD_G },
3143 { "snb", PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS },
3144 { "ivb", PCI_CHIP_IVYBRIDGE_S_GT2 },
3145 { "hsw", PCI_CHIP_HASWELL_CRW_E_GT3 },
3146 { "byt", PCI_CHIP_VALLEYVIEW_3 },
3147 { "bdw", 0x1620 | BDW_ULX },
3148 { "skl", PCI_CHIP_SKYLAKE_DT_GT2 },
3149 { "kbl", PCI_CHIP_KABYLAKE_DT_GT2 },
3153 for (i = 0; i < ARRAY_SIZE(name_map); i++) {
3154 if (!strcmp(name_map[i].name, devid_override))
3155 return name_map[i].pci_id;
3158 return strtod(devid_override, NULL);
3162 * Get the PCI ID for the device. This can be overridden by setting the
3163 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
3166 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
3168 char *devid_override;
3171 drm_i915_getparam_t gp;
3173 if (geteuid() == getuid()) {
3174 devid_override = getenv("INTEL_DEVID_OVERRIDE");
3175 if (devid_override) {
3176 bufmgr_gem->no_exec = true;
3177 return parse_devid_override(devid_override);
3182 gp.param = I915_PARAM_CHIPSET_ID;
3184 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3186 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
3187 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
3193 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
3195 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3197 return bufmgr_gem->pci_device;
3201 * Sets the AUB filename.
3203 * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
3204 * for it to have any effect.
3207 drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
3208 const char *filename)
3213 * Sets up AUB dumping.
3215 * This is a trace file format that can be used with the simulator.
3216 * Packets are emitted in a format somewhat like GPU command packets.
3217 * You can set up a GTT and upload your objects into the referenced
3218 * space, then send off batchbuffers and get BMPs out the other end.
3221 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
3223 fprintf(stderr, "libdrm aub dumping is deprecated.\n\n"
3224 "Use intel_aubdump from intel-gpu-tools instead. Install intel-gpu-tools,\n"
3225 "then run (for example)\n\n"
3226 "\t$ intel_aubdump --output=trace.aub glxgears -geometry 500x500\n\n"
3227 "See the intel_aubdump man page for more details.\n");
3230 drm_public drm_intel_context *
3231 drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
3233 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3234 struct drm_i915_gem_context_create create;
3235 drm_intel_context *context = NULL;
3238 context = calloc(1, sizeof(*context));
3243 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
3245 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
3251 context->ctx_id = create.ctx_id;
3252 context->bufmgr = bufmgr;
3258 drm_intel_gem_context_get_id(drm_intel_context *ctx, uint32_t *ctx_id)
3263 *ctx_id = ctx->ctx_id;
3269 drm_intel_gem_context_destroy(drm_intel_context *ctx)
3271 drm_intel_bufmgr_gem *bufmgr_gem;
3272 struct drm_i915_gem_context_destroy destroy;
3280 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3281 destroy.ctx_id = ctx->ctx_id;
3282 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
3285 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
3292 drm_intel_get_reset_stats(drm_intel_context *ctx,
3293 uint32_t *reset_count,
3297 drm_intel_bufmgr_gem *bufmgr_gem;
3298 struct drm_i915_reset_stats stats;
3306 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3307 stats.ctx_id = ctx->ctx_id;
3308 ret = drmIoctl(bufmgr_gem->fd,
3309 DRM_IOCTL_I915_GET_RESET_STATS,
3312 if (reset_count != NULL)
3313 *reset_count = stats.reset_count;
3316 *active = stats.batch_active;
3318 if (pending != NULL)
3319 *pending = stats.batch_pending;
3326 drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
3330 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3331 struct drm_i915_reg_read reg_read;
3335 reg_read.offset = offset;
3337 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read);
3339 *result = reg_read.val;
3344 drm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
3346 drm_i915_getparam_t gp;
3350 gp.value = (int*)subslice_total;
3351 gp.param = I915_PARAM_SUBSLICE_TOTAL;
3352 ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3360 drm_intel_get_eu_total(int fd, unsigned int *eu_total)
3362 drm_i915_getparam_t gp;
3366 gp.value = (int*)eu_total;
3367 gp.param = I915_PARAM_EU_TOTAL;
3368 ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3376 drm_intel_get_pooled_eu(int fd)
3378 drm_i915_getparam_t gp;
3382 gp.param = I915_PARAM_HAS_POOLED_EU;
3384 if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
3391 drm_intel_get_min_eu_in_pool(int fd)
3393 drm_i915_getparam_t gp;
3397 gp.param = I915_PARAM_MIN_EU_IN_POOL;
3399 if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
3406 * Annotate the given bo for use in aub dumping.
3408 * \param annotations is an array of drm_intel_aub_annotation objects
3409 * describing the type of data in various sections of the bo. Each
3410 * element of the array specifies the type and subtype of a section of
3411 * the bo, and the past-the-end offset of that section. The elements
3412 * of \c annotations must be sorted so that ending_offset is
3415 * \param count is the number of elements in the \c annotations array.
3416 * If \c count is zero, then \c annotations will not be dereferenced.
3418 * Annotations are copied into a private data structure, so caller may
3419 * re-use the memory pointed to by \c annotations after the call
3422 * Annotations are stored for the lifetime of the bo; to reset to the
3423 * default state (no annotations), call this function with a \c count
3426 drm_public void drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
3427 drm_intel_aub_annotation *annotations,
3432 static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
3433 static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
3435 static drm_intel_bufmgr_gem *
3436 drm_intel_bufmgr_gem_find(int fd)
3438 drm_intel_bufmgr_gem *bufmgr_gem;
3440 DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
3441 if (bufmgr_gem->fd == fd) {
3442 atomic_inc(&bufmgr_gem->refcount);
3451 drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
3453 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3455 if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
3456 pthread_mutex_lock(&bufmgr_list_mutex);
3458 if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
3459 DRMLISTDEL(&bufmgr_gem->managers);
3460 drm_intel_bufmgr_gem_destroy(bufmgr);
3463 pthread_mutex_unlock(&bufmgr_list_mutex);
3467 drm_public void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
3469 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3470 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3472 if (bo_gem->gtt_virtual)
3473 return bo_gem->gtt_virtual;
3475 if (bo_gem->is_userptr)
3478 pthread_mutex_lock(&bufmgr_gem->lock);
3479 if (bo_gem->gtt_virtual == NULL) {
3480 struct drm_i915_gem_mmap_gtt mmap_arg;
3483 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
3484 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3486 if (bo_gem->map_count++ == 0)
3487 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3490 mmap_arg.handle = bo_gem->gem_handle;
3492 /* Get the fake offset back... */
3494 if (drmIoctl(bufmgr_gem->fd,
3495 DRM_IOCTL_I915_GEM_MMAP_GTT,
3498 ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
3499 MAP_SHARED, bufmgr_gem->fd,
3502 if (ptr == MAP_FAILED) {
3503 if (--bo_gem->map_count == 0)
3504 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3508 bo_gem->gtt_virtual = ptr;
3510 pthread_mutex_unlock(&bufmgr_gem->lock);
3512 return bo_gem->gtt_virtual;
3515 drm_public void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
3517 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3518 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3520 if (bo_gem->mem_virtual)
3521 return bo_gem->mem_virtual;
3523 if (bo_gem->is_userptr) {
3524 /* Return the same user ptr */
3525 return bo_gem->user_virtual;
3528 pthread_mutex_lock(&bufmgr_gem->lock);
3529 if (!bo_gem->mem_virtual) {
3530 struct drm_i915_gem_mmap mmap_arg;
3532 if (bo_gem->map_count++ == 0)
3533 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3535 DBG("bo_map: %d (%s), map_count=%d\n",
3536 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3539 mmap_arg.handle = bo_gem->gem_handle;
3540 mmap_arg.size = bo->size;
3541 if (drmIoctl(bufmgr_gem->fd,
3542 DRM_IOCTL_I915_GEM_MMAP,
3544 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3545 __FILE__, __LINE__, bo_gem->gem_handle,
3546 bo_gem->name, strerror(errno));
3547 if (--bo_gem->map_count == 0)
3548 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3550 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
3551 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
3554 pthread_mutex_unlock(&bufmgr_gem->lock);
3556 return bo_gem->mem_virtual;
3559 drm_public void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
3561 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3562 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3564 if (bo_gem->wc_virtual)
3565 return bo_gem->wc_virtual;
3567 if (bo_gem->is_userptr)
3570 pthread_mutex_lock(&bufmgr_gem->lock);
3571 if (!bo_gem->wc_virtual) {
3572 struct drm_i915_gem_mmap mmap_arg;
3574 if (bo_gem->map_count++ == 0)
3575 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3577 DBG("bo_map: %d (%s), map_count=%d\n",
3578 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3581 mmap_arg.handle = bo_gem->gem_handle;
3582 mmap_arg.size = bo->size;
3583 mmap_arg.flags = I915_MMAP_WC;
3584 if (drmIoctl(bufmgr_gem->fd,
3585 DRM_IOCTL_I915_GEM_MMAP,
3587 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3588 __FILE__, __LINE__, bo_gem->gem_handle,
3589 bo_gem->name, strerror(errno));
3590 if (--bo_gem->map_count == 0)
3591 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3593 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
3594 bo_gem->wc_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
3597 pthread_mutex_unlock(&bufmgr_gem->lock);
3599 return bo_gem->wc_virtual;
3603 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3604 * and manage map buffer objections.
3606 * \param fd File descriptor of the opened DRM device.
3608 drm_public drm_intel_bufmgr *
3609 drm_intel_bufmgr_gem_init(int fd, int batch_size)
3611 drm_intel_bufmgr_gem *bufmgr_gem;
3612 struct drm_i915_gem_get_aperture aperture;
3613 drm_i915_getparam_t gp;
3617 pthread_mutex_lock(&bufmgr_list_mutex);
3619 bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
3623 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
3624 if (bufmgr_gem == NULL)
3627 bufmgr_gem->fd = fd;
3628 atomic_set(&bufmgr_gem->refcount, 1);
3630 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
3637 ret = drmIoctl(bufmgr_gem->fd,
3638 DRM_IOCTL_I915_GEM_GET_APERTURE,
3642 bufmgr_gem->gtt_size = aperture.aper_available_size;
3644 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3646 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
3647 fprintf(stderr, "Assuming %dkB available aperture size.\n"
3648 "May lead to reduced performance or incorrect "
3650 (int)bufmgr_gem->gtt_size / 1024);
3653 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
3655 if (IS_GEN2(bufmgr_gem->pci_device))
3656 bufmgr_gem->gen = 2;
3657 else if (IS_GEN3(bufmgr_gem->pci_device))
3658 bufmgr_gem->gen = 3;
3659 else if (IS_GEN4(bufmgr_gem->pci_device))
3660 bufmgr_gem->gen = 4;
3661 else if (IS_GEN5(bufmgr_gem->pci_device))
3662 bufmgr_gem->gen = 5;
3663 else if (IS_GEN6(bufmgr_gem->pci_device))
3664 bufmgr_gem->gen = 6;
3665 else if (IS_GEN7(bufmgr_gem->pci_device))
3666 bufmgr_gem->gen = 7;
3667 else if (IS_GEN8(bufmgr_gem->pci_device))
3668 bufmgr_gem->gen = 8;
3669 else if (!intel_get_genx(bufmgr_gem->pci_device, &bufmgr_gem->gen)) {
3675 if (IS_GEN3(bufmgr_gem->pci_device) &&
3676 bufmgr_gem->gtt_size > 256*1024*1024) {
3677 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3678 * be used for tiled blits. To simplify the accounting, just
3679 * subtract the unmappable part (fixed to 256MB on all known
3680 * gen3 devices) if the kernel advertises it. */
3681 bufmgr_gem->gtt_size -= 256*1024*1024;
3687 gp.param = I915_PARAM_HAS_EXECBUF2;
3688 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3692 gp.param = I915_PARAM_HAS_BSD;
3693 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3694 bufmgr_gem->has_bsd = ret == 0;
3696 gp.param = I915_PARAM_HAS_BLT;
3697 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3698 bufmgr_gem->has_blt = ret == 0;
3700 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
3701 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3702 bufmgr_gem->has_relaxed_fencing = ret == 0;
3704 gp.param = I915_PARAM_HAS_EXEC_ASYNC;
3705 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3706 bufmgr_gem->has_exec_async = ret == 0;
3708 bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr;
3710 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3711 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3712 bufmgr_gem->has_wait_timeout = ret == 0;
3714 gp.param = I915_PARAM_HAS_LLC;
3715 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3717 /* Kernel does not supports HAS_LLC query, fallback to GPU
3718 * generation detection and assume that we have LLC on GEN6/7
3720 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3721 IS_GEN7(bufmgr_gem->pci_device));
3723 bufmgr_gem->has_llc = *gp.value;
3725 gp.param = I915_PARAM_HAS_VEBOX;
3726 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3727 bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
3729 gp.param = I915_PARAM_HAS_EXEC_SOFTPIN;
3730 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3731 if (ret == 0 && *gp.value > 0)
3732 bufmgr_gem->bufmgr.bo_set_softpin_offset = drm_intel_gem_bo_set_softpin_offset;
3734 if (bufmgr_gem->gen < 4) {
3735 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
3736 gp.value = &bufmgr_gem->available_fences;
3737 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3739 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
3741 fprintf(stderr, "param: %d, val: %d\n", gp.param,
3743 bufmgr_gem->available_fences = 0;
3745 /* XXX The kernel reports the total number of fences,
3746 * including any that may be pinned.
3748 * We presume that there will be at least one pinned
3749 * fence for the scanout buffer, but there may be more
3750 * than one scanout and the user may be manually
3751 * pinning buffers. Let's move to execbuffer2 and
3752 * thereby forget the insanity of using fences...
3754 bufmgr_gem->available_fences -= 2;
3755 if (bufmgr_gem->available_fences < 0)
3756 bufmgr_gem->available_fences = 0;
3760 if (bufmgr_gem->gen >= 8) {
3761 gp.param = I915_PARAM_HAS_ALIASING_PPGTT;
3762 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3763 if (ret == 0 && *gp.value == 3)
3764 bufmgr_gem->bufmgr.bo_use_48b_address_range = drm_intel_gem_bo_use_48b_address_range;
3767 /* Let's go with one relocation per every 2 dwords (but round down a bit
3768 * since a power of two will mean an extra page allocation for the reloc
3771 * Every 4 was too few for the blender benchmark.
3773 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
3775 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
3776 bufmgr_gem->bufmgr.bo_alloc_for_render =
3777 drm_intel_gem_bo_alloc_for_render;
3778 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
3779 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
3780 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
3781 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
3782 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
3783 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
3784 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
3785 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
3786 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
3787 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
3788 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
3789 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
3790 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
3791 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
3792 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
3793 /* Use the new one if available */
3795 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
3796 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3798 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
3799 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
3800 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3801 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
3802 bufmgr_gem->bufmgr.debug = 0;
3803 bufmgr_gem->bufmgr.check_aperture_space =
3804 drm_intel_gem_check_aperture_space;
3805 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3806 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
3807 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
3808 drm_intel_gem_get_pipe_from_crtc_id;
3809 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
3811 init_cache_buckets(bufmgr_gem);
3813 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
3814 bufmgr_gem->vma_max = -1; /* unlimited by default */
3816 DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
3819 pthread_mutex_unlock(&bufmgr_list_mutex);
3821 return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;