1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
53 #include <sys/types.h>
56 #include "libdrm_lists.h"
57 #include "intel_bufmgr.h"
58 #include "intel_bufmgr_priv.h"
59 #include "intel_chipset.h"
64 #define DBG(...) do { \
65 if (bufmgr_gem->bufmgr.debug) \
66 fprintf(stderr, __VA_ARGS__); \
69 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
71 struct drm_intel_gem_bo_bucket {
76 /* Only cache objects up to 64MB. Bigger than that, and the rounding of the
77 * size makes many operations fail that wouldn't otherwise.
79 #define DRM_INTEL_GEM_BO_BUCKETS 14
80 typedef struct _drm_intel_bufmgr_gem {
81 drm_intel_bufmgr bufmgr;
89 struct drm_i915_gem_exec_object *exec_objects;
90 struct drm_i915_gem_exec_object2 *exec2_objects;
91 drm_intel_bo **exec_bos;
95 /** Array of lists of cached gem objects of power-of-two sizes */
96 struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
104 } drm_intel_bufmgr_gem;
106 #define DRM_INTEL_RELOC_FENCE (1<<0)
108 typedef struct _drm_intel_reloc_target_info {
111 } drm_intel_reloc_target;
113 struct _drm_intel_bo_gem {
121 * Kenel-assigned global name for this object
123 unsigned int global_name;
126 * Index of the buffer within the validation list while preparing a
127 * batchbuffer execution.
132 * Current tiling mode
134 uint32_t tiling_mode;
135 uint32_t swizzle_mode;
139 /** Array passed to the DRM containing relocation information. */
140 struct drm_i915_gem_relocation_entry *relocs;
142 * Array of info structs corresponding to relocs[i].target_handle etc
144 drm_intel_reloc_target *reloc_target_info;
145 /** Number of entries in relocs */
147 /** Mapped address for the buffer, saved across map/unmap cycles */
149 /** GTT virtual address for the buffer, saved across map/unmap cycles */
156 * Boolean of whether this BO and its children have been included in
157 * the current drm_intel_bufmgr_check_aperture_space() total.
159 char included_in_check_aperture;
162 * Boolean of whether this buffer has been used as a relocation
163 * target and had its size accounted for, and thus can't have any
164 * further relocations added to it.
166 char used_as_reloc_target;
169 * Boolean of whether we have encountered an error whilst building the relocation tree.
174 * Boolean of whether this buffer can be re-used
179 * Size in bytes of this buffer and its relocation descendents.
181 * Used to avoid costly tree walking in
182 * drm_intel_bufmgr_check_aperture in the common case.
187 * Number of potential fence registers required by this buffer and its
190 int reloc_tree_fences;
194 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
197 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
200 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
201 uint32_t * swizzle_mode);
204 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
207 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
210 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
212 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
215 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
216 uint32_t *tiling_mode)
218 unsigned long min_size, max_size;
221 if (*tiling_mode == I915_TILING_NONE)
224 /* 965+ just need multiples of page size for tiling */
225 if (bufmgr_gem->gen >= 4)
226 return ROUND_UP_TO(size, 4096);
228 /* Older chips need powers of two, of at least 512k or 1M */
229 if (bufmgr_gem->gen == 3) {
230 min_size = 1024*1024;
231 max_size = 128*1024*1024;
234 max_size = 64*1024*1024;
237 if (size > max_size) {
238 *tiling_mode = I915_TILING_NONE;
242 for (i = min_size; i < size; i <<= 1)
249 * Round a given pitch up to the minimum required for X tiling on a
250 * given chip. We use 512 as the minimum to allow for a later tiling
254 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
255 unsigned long pitch, uint32_t tiling_mode)
257 unsigned long tile_width;
260 if (tiling_mode == I915_TILING_NONE)
263 if (tiling_mode == I915_TILING_X)
268 /* 965 is flexible */
269 if (bufmgr_gem->gen >= 4)
270 return ROUND_UP_TO(pitch, tile_width);
272 /* Pre-965 needs power of two tile width */
273 for (i = tile_width; i < pitch; i <<= 1)
279 static struct drm_intel_gem_bo_bucket *
280 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
285 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
286 struct drm_intel_gem_bo_bucket *bucket =
287 &bufmgr_gem->cache_bucket[i];
288 if (bucket->size >= size) {
297 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
301 for (i = 0; i < bufmgr_gem->exec_count; i++) {
302 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
303 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
305 if (bo_gem->relocs == NULL) {
306 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
311 for (j = 0; j < bo_gem->reloc_count; j++) {
312 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
313 drm_intel_bo_gem *target_gem =
314 (drm_intel_bo_gem *) target_bo;
316 DBG("%2d: %d (%s)@0x%08llx -> "
317 "%d (%s)@0x%08lx + 0x%08x\n",
319 bo_gem->gem_handle, bo_gem->name,
320 (unsigned long long)bo_gem->relocs[j].offset,
321 target_gem->gem_handle,
324 bo_gem->relocs[j].delta);
330 drm_intel_gem_bo_reference(drm_intel_bo *bo)
332 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
334 assert(atomic_read(&bo_gem->refcount) > 0);
335 atomic_inc(&bo_gem->refcount);
339 * Adds the given buffer to the list of buffers to be validated (moved into the
340 * appropriate memory type) with the next batch submission.
342 * If a buffer is validated multiple times in a batch submission, it ends up
343 * with the intersection of the memory type flags and the union of the
347 drm_intel_add_validate_buffer(drm_intel_bo *bo)
349 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
350 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
353 if (bo_gem->validate_index != -1)
356 /* Extend the array of validation entries as necessary. */
357 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
358 int new_size = bufmgr_gem->exec_size * 2;
363 bufmgr_gem->exec_objects =
364 realloc(bufmgr_gem->exec_objects,
365 sizeof(*bufmgr_gem->exec_objects) * new_size);
366 bufmgr_gem->exec_bos =
367 realloc(bufmgr_gem->exec_bos,
368 sizeof(*bufmgr_gem->exec_bos) * new_size);
369 bufmgr_gem->exec_size = new_size;
372 index = bufmgr_gem->exec_count;
373 bo_gem->validate_index = index;
374 /* Fill in array entry */
375 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
376 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
377 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
378 bufmgr_gem->exec_objects[index].alignment = 0;
379 bufmgr_gem->exec_objects[index].offset = 0;
380 bufmgr_gem->exec_bos[index] = bo;
381 bufmgr_gem->exec_count++;
385 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
387 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
388 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
391 if (bo_gem->validate_index != -1) {
393 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
394 EXEC_OBJECT_NEEDS_FENCE;
398 /* Extend the array of validation entries as necessary. */
399 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
400 int new_size = bufmgr_gem->exec_size * 2;
405 bufmgr_gem->exec2_objects =
406 realloc(bufmgr_gem->exec2_objects,
407 sizeof(*bufmgr_gem->exec2_objects) * new_size);
408 bufmgr_gem->exec_bos =
409 realloc(bufmgr_gem->exec_bos,
410 sizeof(*bufmgr_gem->exec_bos) * new_size);
411 bufmgr_gem->exec_size = new_size;
414 index = bufmgr_gem->exec_count;
415 bo_gem->validate_index = index;
416 /* Fill in array entry */
417 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
418 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
419 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
420 bufmgr_gem->exec2_objects[index].alignment = 0;
421 bufmgr_gem->exec2_objects[index].offset = 0;
422 bufmgr_gem->exec_bos[index] = bo;
423 bufmgr_gem->exec2_objects[index].flags = 0;
424 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
425 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
427 bufmgr_gem->exec2_objects[index].flags |=
428 EXEC_OBJECT_NEEDS_FENCE;
430 bufmgr_gem->exec_count++;
433 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
437 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
438 drm_intel_bo_gem *bo_gem)
442 assert(!bo_gem->used_as_reloc_target);
444 /* The older chipsets are far-less flexible in terms of tiling,
445 * and require tiled buffer to be size aligned in the aperture.
446 * This means that in the worst possible case we will need a hole
447 * twice as large as the object in order for it to fit into the
448 * aperture. Optimal packing is for wimps.
450 size = bo_gem->bo.size;
451 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE)
454 bo_gem->reloc_tree_size = size;
458 drm_intel_setup_reloc_list(drm_intel_bo *bo)
460 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
461 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
462 unsigned int max_relocs = bufmgr_gem->max_relocs;
464 if (bo->size / 4 < max_relocs)
465 max_relocs = bo->size / 4;
467 bo_gem->relocs = malloc(max_relocs *
468 sizeof(struct drm_i915_gem_relocation_entry));
469 bo_gem->reloc_target_info = malloc(max_relocs *
470 sizeof(drm_intel_reloc_target *));
471 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
472 bo_gem->has_error = 1;
474 free (bo_gem->relocs);
475 bo_gem->relocs = NULL;
477 free (bo_gem->reloc_target_info);
478 bo_gem->reloc_target_info = NULL;
487 drm_intel_gem_bo_busy(drm_intel_bo *bo)
489 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
490 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
491 struct drm_i915_gem_busy busy;
494 memset(&busy, 0, sizeof(busy));
495 busy.handle = bo_gem->gem_handle;
498 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
499 } while (ret == -1 && errno == EINTR);
501 return (ret == 0 && busy.busy);
505 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
506 drm_intel_bo_gem *bo_gem, int state)
508 struct drm_i915_gem_madvise madv;
510 madv.handle = bo_gem->gem_handle;
513 ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
515 return madv.retained;
519 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
521 return drm_intel_gem_bo_madvise_internal
522 ((drm_intel_bufmgr_gem *) bo->bufmgr,
523 (drm_intel_bo_gem *) bo,
527 /* drop the oldest entries that have been purged by the kernel */
529 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
530 struct drm_intel_gem_bo_bucket *bucket)
532 while (!DRMLISTEMPTY(&bucket->head)) {
533 drm_intel_bo_gem *bo_gem;
535 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
536 bucket->head.next, head);
537 if (drm_intel_gem_bo_madvise_internal
538 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
541 DRMLISTDEL(&bo_gem->head);
542 drm_intel_gem_bo_free(&bo_gem->bo);
546 static drm_intel_bo *
547 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
552 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
553 drm_intel_bo_gem *bo_gem;
554 unsigned int page_size = getpagesize();
556 struct drm_intel_gem_bo_bucket *bucket;
557 int alloc_from_cache;
558 unsigned long bo_size;
561 if (flags & BO_ALLOC_FOR_RENDER)
564 /* Round the allocated size up to a power of two number of pages. */
565 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
567 /* If we don't have caching at this size, don't actually round the
570 if (bucket == NULL) {
572 if (bo_size < page_size)
575 bo_size = bucket->size;
578 pthread_mutex_lock(&bufmgr_gem->lock);
579 /* Get a buffer out of the cache if available */
581 alloc_from_cache = 0;
582 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
584 /* Allocate new render-target BOs from the tail (MRU)
585 * of the list, as it will likely be hot in the GPU
586 * cache and in the aperture for us.
588 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
589 bucket->head.prev, head);
590 DRMLISTDEL(&bo_gem->head);
591 alloc_from_cache = 1;
593 /* For non-render-target BOs (where we're probably
594 * going to map it first thing in order to fill it
595 * with data), check if the last BO in the cache is
596 * unbusy, and only reuse in that case. Otherwise,
597 * allocating a new buffer is probably faster than
598 * waiting for the GPU to finish.
600 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
601 bucket->head.next, head);
602 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
603 alloc_from_cache = 1;
604 DRMLISTDEL(&bo_gem->head);
608 if (alloc_from_cache) {
609 if (!drm_intel_gem_bo_madvise_internal
610 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
611 drm_intel_gem_bo_free(&bo_gem->bo);
612 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
618 pthread_mutex_unlock(&bufmgr_gem->lock);
620 if (!alloc_from_cache) {
621 struct drm_i915_gem_create create;
623 bo_gem = calloc(1, sizeof(*bo_gem));
627 bo_gem->bo.size = bo_size;
628 memset(&create, 0, sizeof(create));
629 create.size = bo_size;
632 ret = ioctl(bufmgr_gem->fd,
633 DRM_IOCTL_I915_GEM_CREATE,
635 } while (ret == -1 && errno == EINTR);
636 bo_gem->gem_handle = create.handle;
637 bo_gem->bo.handle = bo_gem->gem_handle;
642 bo_gem->bo.bufmgr = bufmgr;
646 atomic_set(&bo_gem->refcount, 1);
647 bo_gem->validate_index = -1;
648 bo_gem->reloc_tree_fences = 0;
649 bo_gem->used_as_reloc_target = 0;
650 bo_gem->has_error = 0;
651 bo_gem->tiling_mode = I915_TILING_NONE;
652 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
653 bo_gem->reusable = 1;
655 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
657 DBG("bo_create: buf %d (%s) %ldb\n",
658 bo_gem->gem_handle, bo_gem->name, size);
663 static drm_intel_bo *
664 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
667 unsigned int alignment)
669 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
670 BO_ALLOC_FOR_RENDER);
673 static drm_intel_bo *
674 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
677 unsigned int alignment)
679 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0);
682 static drm_intel_bo *
683 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
684 int x, int y, int cpp, uint32_t *tiling_mode,
685 unsigned long *pitch, unsigned long flags)
687 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
689 unsigned long size, stride, aligned_y = y;
692 /* If we're tiled, our allocations are in 8 or 32-row blocks,
693 * so failure to align our height means that we won't allocate
696 * If we're untiled, we still have to align to 2 rows high
697 * because the data port accesses 2x2 blocks even if the
698 * bottom row isn't to be rendered, so failure to align means
699 * we could walk off the end of the GTT and fault. This is
700 * documented on 965, and may be the case on older chipsets
701 * too so we try to be careful.
703 if (*tiling_mode == I915_TILING_NONE)
704 aligned_y = ALIGN(y, 2);
705 else if (*tiling_mode == I915_TILING_X)
706 aligned_y = ALIGN(y, 8);
707 else if (*tiling_mode == I915_TILING_Y)
708 aligned_y = ALIGN(y, 32);
711 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, *tiling_mode);
712 size = stride * aligned_y;
713 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
715 bo = drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags);
719 ret = drm_intel_gem_bo_set_tiling(bo, tiling_mode, stride);
721 drm_intel_gem_bo_unreference(bo);
731 * Returns a drm_intel_bo wrapping the given buffer object handle.
733 * This can be used when one application needs to pass a buffer object
737 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
741 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
742 drm_intel_bo_gem *bo_gem;
744 struct drm_gem_open open_arg;
745 struct drm_i915_gem_get_tiling get_tiling;
747 bo_gem = calloc(1, sizeof(*bo_gem));
751 memset(&open_arg, 0, sizeof(open_arg));
752 open_arg.name = handle;
754 ret = ioctl(bufmgr_gem->fd,
757 } while (ret == -1 && errno == EINTR);
759 fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
760 name, handle, strerror(errno));
764 bo_gem->bo.size = open_arg.size;
765 bo_gem->bo.offset = 0;
766 bo_gem->bo.virtual = NULL;
767 bo_gem->bo.bufmgr = bufmgr;
769 atomic_set(&bo_gem->refcount, 1);
770 bo_gem->validate_index = -1;
771 bo_gem->gem_handle = open_arg.handle;
772 bo_gem->global_name = handle;
773 bo_gem->reusable = 0;
775 memset(&get_tiling, 0, sizeof(get_tiling));
776 get_tiling.handle = bo_gem->gem_handle;
777 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
779 drm_intel_gem_bo_unreference(&bo_gem->bo);
782 bo_gem->tiling_mode = get_tiling.tiling_mode;
783 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
784 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
786 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
792 drm_intel_gem_bo_free(drm_intel_bo *bo)
794 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
795 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
796 struct drm_gem_close close;
799 if (bo_gem->mem_virtual)
800 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
801 if (bo_gem->gtt_virtual)
802 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
804 /* Close this object */
805 memset(&close, 0, sizeof(close));
806 close.handle = bo_gem->gem_handle;
807 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
810 "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
811 bo_gem->gem_handle, bo_gem->name, strerror(errno));
816 /** Frees all cached buffers significantly older than @time. */
818 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
822 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
823 struct drm_intel_gem_bo_bucket *bucket =
824 &bufmgr_gem->cache_bucket[i];
826 while (!DRMLISTEMPTY(&bucket->head)) {
827 drm_intel_bo_gem *bo_gem;
829 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
830 bucket->head.next, head);
831 if (time - bo_gem->free_time <= 1)
834 DRMLISTDEL(&bo_gem->head);
836 drm_intel_gem_bo_free(&bo_gem->bo);
842 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
844 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
845 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
846 struct drm_intel_gem_bo_bucket *bucket;
847 uint32_t tiling_mode;
850 /* Unreference all the target buffers */
851 for (i = 0; i < bo_gem->reloc_count; i++) {
852 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
853 reloc_target_info[i].bo,
856 bo_gem->reloc_count = 0;
857 bo_gem->used_as_reloc_target = 0;
859 DBG("bo_unreference final: %d (%s)\n",
860 bo_gem->gem_handle, bo_gem->name);
862 /* release memory associated with this object */
863 if (bo_gem->reloc_target_info) {
864 free(bo_gem->reloc_target_info);
865 bo_gem->reloc_target_info = NULL;
867 if (bo_gem->relocs) {
868 free(bo_gem->relocs);
869 bo_gem->relocs = NULL;
872 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
873 /* Put the buffer into our internal cache for reuse if we can. */
874 tiling_mode = I915_TILING_NONE;
875 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
876 drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0 &&
877 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
878 I915_MADV_DONTNEED)) {
879 bo_gem->free_time = time;
882 bo_gem->validate_index = -1;
884 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
886 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time);
888 drm_intel_gem_bo_free(bo);
892 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
895 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
897 assert(atomic_read(&bo_gem->refcount) > 0);
898 if (atomic_dec_and_test(&bo_gem->refcount))
899 drm_intel_gem_bo_unreference_final(bo, time);
902 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
904 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
906 assert(atomic_read(&bo_gem->refcount) > 0);
907 if (atomic_dec_and_test(&bo_gem->refcount)) {
908 drm_intel_bufmgr_gem *bufmgr_gem =
909 (drm_intel_bufmgr_gem *) bo->bufmgr;
910 struct timespec time;
912 clock_gettime(CLOCK_MONOTONIC, &time);
914 pthread_mutex_lock(&bufmgr_gem->lock);
915 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
916 pthread_mutex_unlock(&bufmgr_gem->lock);
920 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
922 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
923 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
924 struct drm_i915_gem_set_domain set_domain;
927 pthread_mutex_lock(&bufmgr_gem->lock);
929 /* Allow recursive mapping. Mesa may recursively map buffers with
930 * nested display loops.
932 if (!bo_gem->mem_virtual) {
933 struct drm_i915_gem_mmap mmap_arg;
935 DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
937 memset(&mmap_arg, 0, sizeof(mmap_arg));
938 mmap_arg.handle = bo_gem->gem_handle;
940 mmap_arg.size = bo->size;
942 ret = ioctl(bufmgr_gem->fd,
943 DRM_IOCTL_I915_GEM_MMAP,
945 } while (ret == -1 && errno == EINTR);
949 "%s:%d: Error mapping buffer %d (%s): %s .\n",
950 __FILE__, __LINE__, bo_gem->gem_handle,
951 bo_gem->name, strerror(errno));
952 pthread_mutex_unlock(&bufmgr_gem->lock);
955 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
957 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
958 bo_gem->mem_virtual);
959 bo->virtual = bo_gem->mem_virtual;
961 set_domain.handle = bo_gem->gem_handle;
962 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
964 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
966 set_domain.write_domain = 0;
968 ret = ioctl(bufmgr_gem->fd,
969 DRM_IOCTL_I915_GEM_SET_DOMAIN,
971 } while (ret == -1 && errno == EINTR);
974 fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
975 __FILE__, __LINE__, bo_gem->gem_handle,
977 pthread_mutex_unlock(&bufmgr_gem->lock);
981 pthread_mutex_unlock(&bufmgr_gem->lock);
986 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
988 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
989 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
990 struct drm_i915_gem_set_domain set_domain;
993 pthread_mutex_lock(&bufmgr_gem->lock);
995 /* Get a mapping of the buffer if we haven't before. */
996 if (bo_gem->gtt_virtual == NULL) {
997 struct drm_i915_gem_mmap_gtt mmap_arg;
999 DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
1002 memset(&mmap_arg, 0, sizeof(mmap_arg));
1003 mmap_arg.handle = bo_gem->gem_handle;
1005 /* Get the fake offset back... */
1007 ret = ioctl(bufmgr_gem->fd,
1008 DRM_IOCTL_I915_GEM_MMAP_GTT,
1010 } while (ret == -1 && errno == EINTR);
1014 "%s:%d: Error preparing buffer map %d (%s): %s .\n",
1016 bo_gem->gem_handle, bo_gem->name,
1018 pthread_mutex_unlock(&bufmgr_gem->lock);
1023 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1024 MAP_SHARED, bufmgr_gem->fd,
1026 if (bo_gem->gtt_virtual == MAP_FAILED) {
1027 bo_gem->gtt_virtual = NULL;
1030 "%s:%d: Error mapping buffer %d (%s): %s .\n",
1032 bo_gem->gem_handle, bo_gem->name,
1034 pthread_mutex_unlock(&bufmgr_gem->lock);
1039 bo->virtual = bo_gem->gtt_virtual;
1041 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1042 bo_gem->gtt_virtual);
1044 /* Now move it to the GTT domain so that the CPU caches are flushed */
1045 set_domain.handle = bo_gem->gem_handle;
1046 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1047 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1049 ret = ioctl(bufmgr_gem->fd,
1050 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1052 } while (ret == -1 && errno == EINTR);
1056 fprintf(stderr, "%s:%d: Error setting domain %d: %s\n",
1057 __FILE__, __LINE__, bo_gem->gem_handle,
1061 pthread_mutex_unlock(&bufmgr_gem->lock);
1066 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1068 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1069 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1075 assert(bo_gem->gtt_virtual != NULL);
1077 pthread_mutex_lock(&bufmgr_gem->lock);
1079 pthread_mutex_unlock(&bufmgr_gem->lock);
1084 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1086 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1087 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1088 struct drm_i915_gem_sw_finish sw_finish;
1094 assert(bo_gem->mem_virtual != NULL);
1096 pthread_mutex_lock(&bufmgr_gem->lock);
1098 /* Cause a flush to happen if the buffer's pinned for scanout, so the
1099 * results show up in a timely manner.
1101 sw_finish.handle = bo_gem->gem_handle;
1103 ret = ioctl(bufmgr_gem->fd,
1104 DRM_IOCTL_I915_GEM_SW_FINISH,
1106 } while (ret == -1 && errno == EINTR);
1107 ret = ret == -1 ? -errno : 0;
1110 pthread_mutex_unlock(&bufmgr_gem->lock);
1116 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1117 unsigned long size, const void *data)
1119 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1120 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1121 struct drm_i915_gem_pwrite pwrite;
1124 memset(&pwrite, 0, sizeof(pwrite));
1125 pwrite.handle = bo_gem->gem_handle;
1126 pwrite.offset = offset;
1128 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1130 ret = ioctl(bufmgr_gem->fd,
1131 DRM_IOCTL_I915_GEM_PWRITE,
1133 } while (ret == -1 && errno == EINTR);
1137 "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1138 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1139 (int)size, strerror(errno));
1146 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1148 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1149 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1152 get_pipe_from_crtc_id.crtc_id = crtc_id;
1153 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1154 &get_pipe_from_crtc_id);
1156 /* We return -1 here to signal that we don't
1157 * know which pipe is associated with this crtc.
1158 * This lets the caller know that this information
1159 * isn't available; using the wrong pipe for
1160 * vblank waiting can cause the chipset to lock up
1165 return get_pipe_from_crtc_id.pipe;
1169 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1170 unsigned long size, void *data)
1172 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1173 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1174 struct drm_i915_gem_pread pread;
1177 memset(&pread, 0, sizeof(pread));
1178 pread.handle = bo_gem->gem_handle;
1179 pread.offset = offset;
1181 pread.data_ptr = (uint64_t) (uintptr_t) data;
1183 ret = ioctl(bufmgr_gem->fd,
1184 DRM_IOCTL_I915_GEM_PREAD,
1186 } while (ret == -1 && errno == EINTR);
1190 "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1191 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1192 (int)size, strerror(errno));
1198 /** Waits for all GPU rendering to the object to have completed. */
1200 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1202 drm_intel_gem_bo_start_gtt_access(bo, 0);
1206 * Sets the object to the GTT read and possibly write domain, used by the X
1207 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1209 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1210 * can do tiled pixmaps this way.
1213 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1215 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1216 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1217 struct drm_i915_gem_set_domain set_domain;
1220 set_domain.handle = bo_gem->gem_handle;
1221 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1222 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1224 ret = ioctl(bufmgr_gem->fd,
1225 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1227 } while (ret == -1 && errno == EINTR);
1230 "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1231 __FILE__, __LINE__, bo_gem->gem_handle,
1232 set_domain.read_domains, set_domain.write_domain,
1238 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1240 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1243 free(bufmgr_gem->exec2_objects);
1244 free(bufmgr_gem->exec_objects);
1245 free(bufmgr_gem->exec_bos);
1247 pthread_mutex_destroy(&bufmgr_gem->lock);
1249 /* Free any cached buffer objects we were going to reuse */
1250 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
1251 struct drm_intel_gem_bo_bucket *bucket =
1252 &bufmgr_gem->cache_bucket[i];
1253 drm_intel_bo_gem *bo_gem;
1255 while (!DRMLISTEMPTY(&bucket->head)) {
1256 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1257 bucket->head.next, head);
1258 DRMLISTDEL(&bo_gem->head);
1260 drm_intel_gem_bo_free(&bo_gem->bo);
1268 * Adds the target buffer to the validation list and adds the relocation
1269 * to the reloc_buffer's relocation list.
1271 * The relocation entry at the given offset must already contain the
1272 * precomputed relocation value, because the kernel will optimize out
1273 * the relocation entry write when the buffer hasn't moved from the
1274 * last known offset in target_bo.
1277 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1278 drm_intel_bo *target_bo, uint32_t target_offset,
1279 uint32_t read_domains, uint32_t write_domain,
1282 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1283 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1284 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1286 if (bo_gem->has_error)
1289 if (target_bo_gem->has_error) {
1290 bo_gem->has_error = 1;
1294 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1297 /* We never use HW fences for rendering on 965+ */
1298 if (bufmgr_gem->gen >= 4)
1301 /* Create a new relocation list if needed */
1302 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1305 /* Check overflow */
1306 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1309 assert(offset <= bo->size - 4);
1310 assert((write_domain & (write_domain - 1)) == 0);
1312 /* Make sure that we're not adding a reloc to something whose size has
1313 * already been accounted for.
1315 assert(!bo_gem->used_as_reloc_target);
1316 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1317 /* An object needing a fence is a tiled buffer, so it won't have
1318 * relocs to other buffers.
1321 target_bo_gem->reloc_tree_fences = 1;
1322 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1324 /* Flag the target to disallow further relocations in it. */
1325 target_bo_gem->used_as_reloc_target = 1;
1327 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1328 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1329 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1330 target_bo_gem->gem_handle;
1331 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1332 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1333 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1335 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1336 drm_intel_gem_bo_reference(target_bo);
1338 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1339 DRM_INTEL_RELOC_FENCE;
1341 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1343 bo_gem->reloc_count++;
1349 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1350 drm_intel_bo *target_bo, uint32_t target_offset,
1351 uint32_t read_domains, uint32_t write_domain)
1353 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1355 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1356 read_domains, write_domain,
1357 !bufmgr_gem->fenced_relocs);
1361 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1362 drm_intel_bo *target_bo,
1363 uint32_t target_offset,
1364 uint32_t read_domains, uint32_t write_domain)
1366 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1367 read_domains, write_domain, 1);
1371 * Walk the tree of relocations rooted at BO and accumulate the list of
1372 * validations to be performed and update the relocation buffers with
1373 * index values into the validation list.
1376 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1378 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1381 if (bo_gem->relocs == NULL)
1384 for (i = 0; i < bo_gem->reloc_count; i++) {
1385 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1387 /* Continue walking the tree depth-first. */
1388 drm_intel_gem_bo_process_reloc(target_bo);
1390 /* Add the target to the validate list */
1391 drm_intel_add_validate_buffer(target_bo);
1396 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1398 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1401 if (bo_gem->relocs == NULL)
1404 for (i = 0; i < bo_gem->reloc_count; i++) {
1405 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1408 /* Continue walking the tree depth-first. */
1409 drm_intel_gem_bo_process_reloc2(target_bo);
1411 need_fence = (bo_gem->reloc_target_info[i].flags &
1412 DRM_INTEL_RELOC_FENCE);
1414 /* Add the target to the validate list */
1415 drm_intel_add_validate_buffer2(target_bo, need_fence);
1421 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1425 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1426 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1427 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1429 /* Update the buffer offset */
1430 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1431 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1432 bo_gem->gem_handle, bo_gem->name, bo->offset,
1433 (unsigned long long)bufmgr_gem->exec_objects[i].
1435 bo->offset = bufmgr_gem->exec_objects[i].offset;
1441 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1445 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1446 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1447 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1449 /* Update the buffer offset */
1450 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1451 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1452 bo_gem->gem_handle, bo_gem->name, bo->offset,
1453 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1454 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1460 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
1461 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
1463 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1464 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1465 struct drm_i915_gem_execbuffer execbuf;
1468 if (bo_gem->has_error)
1471 pthread_mutex_lock(&bufmgr_gem->lock);
1472 /* Update indices and set up the validate list. */
1473 drm_intel_gem_bo_process_reloc(bo);
1475 /* Add the batch buffer to the validation list. There are no
1476 * relocations pointing to it.
1478 drm_intel_add_validate_buffer(bo);
1480 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1481 execbuf.buffer_count = bufmgr_gem->exec_count;
1482 execbuf.batch_start_offset = 0;
1483 execbuf.batch_len = used;
1484 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1485 execbuf.num_cliprects = num_cliprects;
1490 ret = ioctl(bufmgr_gem->fd,
1491 DRM_IOCTL_I915_GEM_EXECBUFFER,
1493 } while (ret != 0 && errno == EINTR);
1497 if (errno == ENOSPC) {
1499 "Execbuffer fails to pin. "
1500 "Estimate: %u. Actual: %u. Available: %u\n",
1501 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1504 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1507 (unsigned int)bufmgr_gem->gtt_size);
1510 drm_intel_update_buffer_offsets(bufmgr_gem);
1512 if (bufmgr_gem->bufmgr.debug)
1513 drm_intel_gem_dump_validation_list(bufmgr_gem);
1515 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1516 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1517 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1519 /* Disconnect the buffer from the validate list */
1520 bo_gem->validate_index = -1;
1521 bufmgr_gem->exec_bos[i] = NULL;
1523 bufmgr_gem->exec_count = 0;
1524 pthread_mutex_unlock(&bufmgr_gem->lock);
1530 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
1531 drm_clip_rect_t *cliprects, int num_cliprects,
1534 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1535 struct drm_i915_gem_execbuffer2 execbuf;
1538 pthread_mutex_lock(&bufmgr_gem->lock);
1539 /* Update indices and set up the validate list. */
1540 drm_intel_gem_bo_process_reloc2(bo);
1542 /* Add the batch buffer to the validation list. There are no relocations
1545 drm_intel_add_validate_buffer2(bo, 0);
1547 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
1548 execbuf.buffer_count = bufmgr_gem->exec_count;
1549 execbuf.batch_start_offset = 0;
1550 execbuf.batch_len = used;
1551 execbuf.cliprects_ptr = (uintptr_t)cliprects;
1552 execbuf.num_cliprects = num_cliprects;
1560 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2,
1562 } while (ret != 0 && errno == EINTR);
1566 if (ret == -ENOMEM) {
1568 "Execbuffer fails to pin. "
1569 "Estimate: %u. Actual: %u. Available: %u\n",
1570 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1571 bufmgr_gem->exec_count),
1572 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1573 bufmgr_gem->exec_count),
1574 (unsigned int) bufmgr_gem->gtt_size);
1577 drm_intel_update_buffer_offsets2(bufmgr_gem);
1579 if (bufmgr_gem->bufmgr.debug)
1580 drm_intel_gem_dump_validation_list(bufmgr_gem);
1582 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1583 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1584 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1586 /* Disconnect the buffer from the validate list */
1587 bo_gem->validate_index = -1;
1588 bufmgr_gem->exec_bos[i] = NULL;
1590 bufmgr_gem->exec_count = 0;
1591 pthread_mutex_unlock(&bufmgr_gem->lock);
1597 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
1599 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1600 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1601 struct drm_i915_gem_pin pin;
1604 memset(&pin, 0, sizeof(pin));
1605 pin.handle = bo_gem->gem_handle;
1606 pin.alignment = alignment;
1609 ret = ioctl(bufmgr_gem->fd,
1610 DRM_IOCTL_I915_GEM_PIN,
1612 } while (ret == -1 && errno == EINTR);
1617 bo->offset = pin.offset;
1622 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
1624 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1625 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1626 struct drm_i915_gem_unpin unpin;
1629 memset(&unpin, 0, sizeof(unpin));
1630 unpin.handle = bo_gem->gem_handle;
1632 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
1640 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1643 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1644 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1645 struct drm_i915_gem_set_tiling set_tiling;
1648 if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode)
1651 memset(&set_tiling, 0, sizeof(set_tiling));
1652 set_tiling.handle = bo_gem->gem_handle;
1655 set_tiling.tiling_mode = *tiling_mode;
1656 set_tiling.stride = stride;
1658 ret = ioctl(bufmgr_gem->fd,
1659 DRM_IOCTL_I915_GEM_SET_TILING,
1661 } while (ret == -1 && errno == EINTR);
1662 bo_gem->tiling_mode = set_tiling.tiling_mode;
1663 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
1665 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
1667 *tiling_mode = bo_gem->tiling_mode;
1668 return ret == 0 ? 0 : -errno;
1672 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1673 uint32_t * swizzle_mode)
1675 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1677 *tiling_mode = bo_gem->tiling_mode;
1678 *swizzle_mode = bo_gem->swizzle_mode;
1683 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
1685 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1686 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1687 struct drm_gem_flink flink;
1690 if (!bo_gem->global_name) {
1691 memset(&flink, 0, sizeof(flink));
1692 flink.handle = bo_gem->gem_handle;
1694 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1697 bo_gem->global_name = flink.name;
1698 bo_gem->reusable = 0;
1701 *name = bo_gem->global_name;
1706 * Enables unlimited caching of buffer objects for reuse.
1708 * This is potentially very memory expensive, as the cache at each bucket
1709 * size is only bounded by how many buffers of that size we've managed to have
1710 * in flight at once.
1713 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
1715 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1717 bufmgr_gem->bo_reuse = 1;
1721 * Enable use of fenced reloc type.
1723 * New code should enable this to avoid unnecessary fence register
1724 * allocation. If this option is not enabled, all relocs will have fence
1725 * register allocated.
1728 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
1730 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
1732 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
1733 bufmgr_gem->fenced_relocs = 1;
1737 * Return the additional aperture space required by the tree of buffer objects
1741 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
1743 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1747 if (bo == NULL || bo_gem->included_in_check_aperture)
1751 bo_gem->included_in_check_aperture = 1;
1753 for (i = 0; i < bo_gem->reloc_count; i++)
1755 drm_intel_gem_bo_get_aperture_space(bo_gem->
1756 reloc_target_info[i].bo);
1762 * Count the number of buffers in this list that need a fence reg
1764 * If the count is greater than the number of available regs, we'll have
1765 * to ask the caller to resubmit a batch with fewer tiled buffers.
1767 * This function over-counts if the same buffer is used multiple times.
1770 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
1773 unsigned int total = 0;
1775 for (i = 0; i < count; i++) {
1776 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1781 total += bo_gem->reloc_tree_fences;
1787 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
1788 * for the next drm_intel_bufmgr_check_aperture_space() call.
1791 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
1793 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1796 if (bo == NULL || !bo_gem->included_in_check_aperture)
1799 bo_gem->included_in_check_aperture = 0;
1801 for (i = 0; i < bo_gem->reloc_count; i++)
1802 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
1803 reloc_target_info[i].bo);
1807 * Return a conservative estimate for the amount of aperture required
1808 * for a collection of buffers. This may double-count some buffers.
1811 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
1814 unsigned int total = 0;
1816 for (i = 0; i < count; i++) {
1817 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1819 total += bo_gem->reloc_tree_size;
1825 * Return the amount of aperture needed for a collection of buffers.
1826 * This avoids double counting any buffers, at the cost of looking
1827 * at every buffer in the set.
1830 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
1833 unsigned int total = 0;
1835 for (i = 0; i < count; i++) {
1836 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
1837 /* For the first buffer object in the array, we get an
1838 * accurate count back for its reloc_tree size (since nothing
1839 * had been flagged as being counted yet). We can save that
1840 * value out as a more conservative reloc_tree_size that
1841 * avoids double-counting target buffers. Since the first
1842 * buffer happens to usually be the batch buffer in our
1843 * callers, this can pull us back from doing the tree
1844 * walk on every new batch emit.
1847 drm_intel_bo_gem *bo_gem =
1848 (drm_intel_bo_gem *) bo_array[i];
1849 bo_gem->reloc_tree_size = total;
1853 for (i = 0; i < count; i++)
1854 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
1859 * Return -1 if the batchbuffer should be flushed before attempting to
1860 * emit rendering referencing the buffers pointed to by bo_array.
1862 * This is required because if we try to emit a batchbuffer with relocations
1863 * to a tree of buffers that won't simultaneously fit in the aperture,
1864 * the rendering will return an error at a point where the software is not
1865 * prepared to recover from it.
1867 * However, we also want to emit the batchbuffer significantly before we reach
1868 * the limit, as a series of batchbuffers each of which references buffers
1869 * covering almost all of the aperture means that at each emit we end up
1870 * waiting to evict a buffer from the last rendering, and we get synchronous
1871 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
1872 * get better parallelism.
1875 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
1877 drm_intel_bufmgr_gem *bufmgr_gem =
1878 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
1879 unsigned int total = 0;
1880 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
1883 /* Check for fence reg constraints if necessary */
1884 if (bufmgr_gem->available_fences) {
1885 total_fences = drm_intel_gem_total_fences(bo_array, count);
1886 if (total_fences > bufmgr_gem->available_fences)
1890 total = drm_intel_gem_estimate_batch_space(bo_array, count);
1892 if (total > threshold)
1893 total = drm_intel_gem_compute_batch_space(bo_array, count);
1895 if (total > threshold) {
1896 DBG("check_space: overflowed available aperture, "
1898 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
1901 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
1902 (int)bufmgr_gem->gtt_size / 1024);
1908 * Disable buffer reuse for objects which are shared with the kernel
1909 * as scanout buffers
1912 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
1914 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1916 bo_gem->reusable = 0;
1921 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
1923 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1926 for (i = 0; i < bo_gem->reloc_count; i++) {
1927 if (bo_gem->reloc_target_info[i].bo == target_bo)
1929 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
1937 /** Return true if target_bo is referenced by bo's relocation tree. */
1939 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
1941 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1943 if (bo == NULL || target_bo == NULL)
1945 if (target_bo_gem->used_as_reloc_target)
1946 return _drm_intel_gem_bo_references(bo, target_bo);
1951 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1952 * and manage map buffer objections.
1954 * \param fd File descriptor of the opened DRM device.
1957 drm_intel_bufmgr_gem_init(int fd, int batch_size)
1959 drm_intel_bufmgr_gem *bufmgr_gem;
1960 struct drm_i915_gem_get_aperture aperture;
1961 drm_i915_getparam_t gp;
1966 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
1967 if (bufmgr_gem == NULL)
1970 bufmgr_gem->fd = fd;
1972 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
1977 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1980 bufmgr_gem->gtt_size = aperture.aper_available_size;
1982 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
1984 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
1985 fprintf(stderr, "Assuming %dkB available aperture size.\n"
1986 "May lead to reduced performance or incorrect "
1988 (int)bufmgr_gem->gtt_size / 1024);
1991 gp.param = I915_PARAM_CHIPSET_ID;
1992 gp.value = &bufmgr_gem->pci_device;
1993 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1995 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
1996 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
1999 if (IS_GEN2(bufmgr_gem))
2000 bufmgr_gem->gen = 2;
2001 else if (IS_GEN3(bufmgr_gem))
2002 bufmgr_gem->gen = 3;
2003 else if (IS_GEN4(bufmgr_gem))
2004 bufmgr_gem->gen = 4;
2006 bufmgr_gem->gen = 6;
2008 gp.param = I915_PARAM_HAS_EXECBUF2;
2009 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2013 if (bufmgr_gem->gen < 4) {
2014 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
2015 gp.value = &bufmgr_gem->available_fences;
2016 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2018 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
2020 fprintf(stderr, "param: %d, val: %d\n", gp.param,
2022 bufmgr_gem->available_fences = 0;
2024 /* XXX The kernel reports the total number of fences,
2025 * including any that may be pinned.
2027 * We presume that there will be at least one pinned
2028 * fence for the scanout buffer, but there may be more
2029 * than one scanout and the user may be manually
2030 * pinning buffers. Let's move to execbuffer2 and
2031 * thereby forget the insanity of using fences...
2033 bufmgr_gem->available_fences -= 2;
2034 if (bufmgr_gem->available_fences < 0)
2035 bufmgr_gem->available_fences = 0;
2039 /* Let's go with one relocation per every 2 dwords (but round down a bit
2040 * since a power of two will mean an extra page allocation for the reloc
2043 * Every 4 was too few for the blender benchmark.
2045 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
2047 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
2048 bufmgr_gem->bufmgr.bo_alloc_for_render =
2049 drm_intel_gem_bo_alloc_for_render;
2050 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
2051 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
2052 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
2053 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
2054 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
2055 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
2056 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
2057 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
2058 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
2059 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
2060 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
2061 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
2062 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
2063 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
2064 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
2065 /* Use the new one if available */
2067 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
2069 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
2070 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
2071 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
2072 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
2073 bufmgr_gem->bufmgr.debug = 0;
2074 bufmgr_gem->bufmgr.check_aperture_space =
2075 drm_intel_gem_check_aperture_space;
2076 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
2077 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
2078 drm_intel_gem_get_pipe_from_crtc_id;
2079 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
2081 /* Initialize the linked lists for BO reuse cache. */
2082 for (i = 0, size = 4096; i < DRM_INTEL_GEM_BO_BUCKETS; i++, size *= 2) {
2083 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2084 bufmgr_gem->cache_bucket[i].size = size;
2087 return &bufmgr_gem->bufmgr;