1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
48 #include <sys/ioctl.h>
52 #include "intel_bufmgr.h"
53 #include "intel_bufmgr_priv.h"
58 #define DBG(...) do { \
59 if (bufmgr_gem->bufmgr.debug) \
60 fprintf(stderr, __VA_ARGS__); \
63 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
65 struct drm_intel_gem_bo_bucket {
66 drm_intel_bo_gem *head, **tail;
68 * Limit on the number of entries in this bucket.
70 * 0 means that this caching at this bucket size is disabled.
71 * -1 means that there is no limit to caching at this size.
77 /* Arbitrarily chosen, 16 means that the maximum size we'll cache for reuse
78 * is 1 << 16 pages, or 256MB.
80 #define DRM_INTEL_GEM_BO_BUCKETS 16
81 typedef struct _drm_intel_bufmgr_gem {
82 drm_intel_bufmgr bufmgr;
90 struct drm_i915_gem_exec_object *exec_objects;
91 drm_intel_bo **exec_bos;
95 /** Array of lists of cached gem objects of power-of-two sizes */
96 struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
99 } drm_intel_bufmgr_gem;
101 struct _drm_intel_bo_gem {
105 /** Boolean whether the mmap ioctl has been called for this buffer yet. */
111 * Kenel-assigned global name for this object
113 unsigned int global_name;
116 * Index of the buffer within the validation list while preparing a
117 * batchbuffer execution.
122 * Boolean whether we've started swrast
123 * Set when the buffer has been mapped
124 * Cleared when the buffer is unmapped
128 /** Array passed to the DRM containing relocation information. */
129 struct drm_i915_gem_relocation_entry *relocs;
130 /** Array of bos corresponding to relocs[i].target_handle */
131 drm_intel_bo **reloc_target_bo;
132 /** Number of entries in relocs */
134 /** Mapped address for the buffer */
138 drm_intel_bo_gem *next;
141 * Boolean of whether this BO and its children have been included in
142 * the current drm_intel_bufmgr_check_aperture_space() total.
144 char included_in_check_aperture;
147 * Boolean of whether this buffer has been used as a relocation
148 * target and had its size accounted for, and thus can't have any
149 * further relocations added to it.
151 char used_as_reloc_target;
154 * Size in bytes of this buffer and its relocation descendents.
156 * Used to avoid costly tree walking in drm_intel_bufmgr_check_aperture in
162 static void drm_intel_gem_bo_reference_locked(drm_intel_bo *bo);
178 static struct drm_intel_gem_bo_bucket *
179 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
184 /* We only do buckets in power of two increments */
185 if ((size & (size - 1)) != 0)
188 /* We should only see sizes rounded to pages. */
189 assert((size % 4096) == 0);
191 /* We always allocate in units of pages */
192 i = ffs(size / 4096) - 1;
193 if (i >= DRM_INTEL_GEM_BO_BUCKETS)
196 return &bufmgr_gem->cache_bucket[i];
200 static void drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
204 for (i = 0; i < bufmgr_gem->exec_count; i++) {
205 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
206 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
208 if (bo_gem->relocs == NULL) {
209 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name);
213 for (j = 0; j < bo_gem->reloc_count; j++) {
214 drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j];
215 drm_intel_bo_gem *target_gem = (drm_intel_bo_gem *)target_bo;
217 DBG("%2d: %d (%s)@0x%08llx -> %d (%s)@0x%08lx + 0x%08x\n",
219 bo_gem->gem_handle, bo_gem->name, bo_gem->relocs[j].offset,
220 target_gem->gem_handle, target_gem->name, target_bo->offset,
221 bo_gem->relocs[j].delta);
227 * Adds the given buffer to the list of buffers to be validated (moved into the
228 * appropriate memory type) with the next batch submission.
230 * If a buffer is validated multiple times in a batch submission, it ends up
231 * with the intersection of the memory type flags and the union of the
235 drm_intel_add_validate_buffer(drm_intel_bo *bo)
237 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
238 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
241 if (bo_gem->validate_index != -1)
244 /* Extend the array of validation entries as necessary. */
245 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
246 int new_size = bufmgr_gem->exec_size * 2;
251 bufmgr_gem->exec_objects =
252 realloc(bufmgr_gem->exec_objects,
253 sizeof(*bufmgr_gem->exec_objects) * new_size);
254 bufmgr_gem->exec_bos =
255 realloc(bufmgr_gem->exec_bos,
256 sizeof(*bufmgr_gem->exec_bos) * new_size);
257 bufmgr_gem->exec_size = new_size;
260 index = bufmgr_gem->exec_count;
261 bo_gem->validate_index = index;
262 /* Fill in array entry */
263 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
264 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
265 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
266 bufmgr_gem->exec_objects[index].alignment = 0;
267 bufmgr_gem->exec_objects[index].offset = 0;
268 bufmgr_gem->exec_bos[index] = bo;
269 drm_intel_gem_bo_reference_locked(bo);
270 bufmgr_gem->exec_count++;
274 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
278 drm_intel_setup_reloc_list(drm_intel_bo *bo)
280 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
281 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
283 bo_gem->relocs = malloc(bufmgr_gem->max_relocs *
284 sizeof(struct drm_i915_gem_relocation_entry));
285 bo_gem->reloc_target_bo = malloc(bufmgr_gem->max_relocs *
286 sizeof(drm_intel_bo *));
291 static drm_intel_bo *
292 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
293 unsigned long size, unsigned int alignment)
295 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
296 drm_intel_bo_gem *bo_gem;
297 unsigned int page_size = getpagesize();
299 struct drm_intel_gem_bo_bucket *bucket;
300 int alloc_from_cache = 0;
301 unsigned long bo_size;
303 /* Round the allocated size up to a power of two number of pages. */
304 bo_size = 1 << logbase2(size);
305 if (bo_size < page_size)
307 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo_size);
309 /* If we don't have caching at this size, don't actually round the
312 if (bucket == NULL || bucket->max_entries == 0) {
314 if (bo_size < page_size)
318 pthread_mutex_lock(&bufmgr_gem->lock);
319 /* Get a buffer out of the cache if available */
320 if (bucket != NULL && bucket->num_entries > 0) {
321 struct drm_i915_gem_busy busy;
323 bo_gem = bucket->head;
324 busy.handle = bo_gem->gem_handle;
326 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
327 alloc_from_cache = (ret == 0 && busy.busy == 0);
329 if (alloc_from_cache) {
330 bucket->head = bo_gem->next;
331 if (bo_gem->next == NULL)
332 bucket->tail = &bucket->head;
333 bucket->num_entries--;
336 pthread_mutex_unlock(&bufmgr_gem->lock);
338 if (!alloc_from_cache) {
339 struct drm_i915_gem_create create;
341 bo_gem = calloc(1, sizeof(*bo_gem));
345 bo_gem->bo.size = bo_size;
346 memset(&create, 0, sizeof(create));
347 create.size = bo_size;
349 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
350 bo_gem->gem_handle = create.handle;
355 bo_gem->bo.bufmgr = bufmgr;
359 bo_gem->refcount = 1;
360 bo_gem->validate_index = -1;
361 bo_gem->reloc_tree_size = bo_gem->bo.size;
362 bo_gem->used_as_reloc_target = 0;
364 DBG("bo_create: buf %d (%s) %ldb\n",
365 bo_gem->gem_handle, bo_gem->name, size);
371 * Returns a drm_intel_bo wrapping the given buffer object handle.
373 * This can be used when one application needs to pass a buffer object
377 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, const char *name,
380 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
381 drm_intel_bo_gem *bo_gem;
383 struct drm_gem_open open_arg;
385 bo_gem = calloc(1, sizeof(*bo_gem));
389 memset(&open_arg, 0, sizeof(open_arg));
390 open_arg.name = handle;
391 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
393 fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
394 name, handle, strerror(errno));
398 bo_gem->bo.size = open_arg.size;
399 bo_gem->bo.offset = 0;
400 bo_gem->bo.virtual = NULL;
401 bo_gem->bo.bufmgr = bufmgr;
403 bo_gem->refcount = 1;
404 bo_gem->validate_index = -1;
405 bo_gem->gem_handle = open_arg.handle;
406 bo_gem->global_name = handle;
408 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
414 drm_intel_gem_bo_reference(drm_intel_bo *bo)
416 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
417 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
419 pthread_mutex_lock(&bufmgr_gem->lock);
421 pthread_mutex_unlock(&bufmgr_gem->lock);
425 drm_intel_gem_bo_reference_locked(drm_intel_bo *bo)
427 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
433 drm_intel_gem_bo_free(drm_intel_bo *bo)
435 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
436 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
437 struct drm_gem_close close;
441 munmap (bo_gem->virtual, bo_gem->bo.size);
443 /* Close this object */
444 close.handle = bo_gem->gem_handle;
445 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
448 "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
449 bo_gem->gem_handle, bo_gem->name, strerror(errno));
455 drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
457 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
458 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
460 if (--bo_gem->refcount == 0) {
461 struct drm_intel_gem_bo_bucket *bucket;
463 if (bo_gem->relocs != NULL) {
466 /* Unreference all the target buffers */
467 for (i = 0; i < bo_gem->reloc_count; i++)
468 drm_intel_gem_bo_unreference_locked(bo_gem->reloc_target_bo[i]);
469 free(bo_gem->reloc_target_bo);
470 free(bo_gem->relocs);
473 DBG("bo_unreference final: %d (%s)\n",
474 bo_gem->gem_handle, bo_gem->name);
476 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
477 /* Put the buffer into our internal cache for reuse if we can. */
478 if (bucket != NULL &&
479 (bucket->max_entries == -1 ||
480 (bucket->max_entries > 0 &&
481 bucket->num_entries < bucket->max_entries)))
484 bo_gem->validate_index = -1;
485 bo_gem->relocs = NULL;
486 bo_gem->reloc_target_bo = NULL;
487 bo_gem->reloc_count = 0;
490 *bucket->tail = bo_gem;
491 bucket->tail = &bo_gem->next;
492 bucket->num_entries++;
494 drm_intel_gem_bo_free(bo);
500 drm_intel_gem_bo_unreference(drm_intel_bo *bo)
502 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
504 pthread_mutex_lock(&bufmgr_gem->lock);
505 drm_intel_gem_bo_unreference_locked(bo);
506 pthread_mutex_unlock(&bufmgr_gem->lock);
510 drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
512 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
513 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
514 struct drm_i915_gem_set_domain set_domain;
517 pthread_mutex_lock(&bufmgr_gem->lock);
519 /* Allow recursive mapping. Mesa may recursively map buffers with
520 * nested display loops.
522 if (!bo_gem->mapped) {
524 assert(bo->virtual == NULL);
526 DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
528 if (bo_gem->virtual == NULL) {
529 struct drm_i915_gem_mmap mmap_arg;
531 memset(&mmap_arg, 0, sizeof(mmap_arg));
532 mmap_arg.handle = bo_gem->gem_handle;
534 mmap_arg.size = bo->size;
535 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
537 fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n",
539 bo_gem->gem_handle, bo_gem->name, strerror(errno));
541 bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
543 bo->virtual = bo_gem->virtual;
546 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual);
549 if (!bo_gem->swrast) {
550 set_domain.handle = bo_gem->gem_handle;
551 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
553 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
555 set_domain.write_domain = 0;
557 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
559 } while (ret == -1 && errno == EINTR);
561 fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
562 __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
567 pthread_mutex_unlock(&bufmgr_gem->lock);
573 drm_intel_gem_bo_unmap(drm_intel_bo *bo)
575 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
576 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
577 struct drm_i915_gem_sw_finish sw_finish;
583 assert(bo_gem->mapped);
585 pthread_mutex_lock(&bufmgr_gem->lock);
586 if (bo_gem->swrast) {
587 sw_finish.handle = bo_gem->gem_handle;
589 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH,
591 } while (ret == -1 && errno == EINTR);
594 pthread_mutex_unlock(&bufmgr_gem->lock);
599 drm_intel_gem_bo_subdata (drm_intel_bo *bo, unsigned long offset,
600 unsigned long size, const void *data)
602 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
603 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
604 struct drm_i915_gem_pwrite pwrite;
607 memset (&pwrite, 0, sizeof (pwrite));
608 pwrite.handle = bo_gem->gem_handle;
609 pwrite.offset = offset;
611 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
613 ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
614 } while (ret == -1 && errno == EINTR);
616 fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
618 bo_gem->gem_handle, (int) offset, (int) size,
625 drm_intel_gem_bo_get_subdata (drm_intel_bo *bo, unsigned long offset,
626 unsigned long size, void *data)
628 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
629 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
630 struct drm_i915_gem_pread pread;
633 memset (&pread, 0, sizeof (pread));
634 pread.handle = bo_gem->gem_handle;
635 pread.offset = offset;
637 pread.data_ptr = (uint64_t) (uintptr_t) data;
639 ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
640 } while (ret == -1 && errno == EINTR);
642 fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
644 bo_gem->gem_handle, (int) offset, (int) size,
651 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
653 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
654 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
655 struct drm_i915_gem_set_domain set_domain;
658 set_domain.handle = bo_gem->gem_handle;
659 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
660 set_domain.write_domain = 0;
661 ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
663 fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
665 bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain,
671 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
673 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
676 free(bufmgr_gem->exec_objects);
677 free(bufmgr_gem->exec_bos);
679 pthread_mutex_destroy(&bufmgr_gem->lock);
681 /* Free any cached buffer objects we were going to reuse */
682 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
683 struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
684 drm_intel_bo_gem *bo_gem;
686 while ((bo_gem = bucket->head) != NULL) {
687 bucket->head = bo_gem->next;
688 if (bo_gem->next == NULL)
689 bucket->tail = &bucket->head;
690 bucket->num_entries--;
692 drm_intel_gem_bo_free(&bo_gem->bo);
700 * Adds the target buffer to the validation list and adds the relocation
701 * to the reloc_buffer's relocation list.
703 * The relocation entry at the given offset must already contain the
704 * precomputed relocation value, because the kernel will optimize out
705 * the relocation entry write when the buffer hasn't moved from the
706 * last known offset in target_bo.
709 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
710 drm_intel_bo *target_bo, uint32_t target_offset,
711 uint32_t read_domains, uint32_t write_domain)
713 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
714 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
715 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
717 pthread_mutex_lock(&bufmgr_gem->lock);
719 /* Create a new relocation list if needed */
720 if (bo_gem->relocs == NULL)
721 drm_intel_setup_reloc_list(bo);
724 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
727 assert (offset <= bo->size - 4);
728 assert ((write_domain & (write_domain-1)) == 0);
730 /* Make sure that we're not adding a reloc to something whose size has
731 * already been accounted for.
733 assert(!bo_gem->used_as_reloc_target);
734 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
736 /* Flag the target to disallow further relocations in it. */
737 target_bo_gem->used_as_reloc_target = 1;
739 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
740 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
741 bo_gem->relocs[bo_gem->reloc_count].target_handle =
742 target_bo_gem->gem_handle;
743 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
744 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
745 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
747 bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
748 drm_intel_gem_bo_reference_locked(target_bo);
750 bo_gem->reloc_count++;
752 pthread_mutex_unlock(&bufmgr_gem->lock);
758 * Walk the tree of relocations rooted at BO and accumulate the list of
759 * validations to be performed and update the relocation buffers with
760 * index values into the validation list.
763 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
765 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
768 if (bo_gem->relocs == NULL)
771 for (i = 0; i < bo_gem->reloc_count; i++) {
772 drm_intel_bo *target_bo = bo_gem->reloc_target_bo[i];
774 /* Continue walking the tree depth-first. */
775 drm_intel_gem_bo_process_reloc(target_bo);
777 /* Add the target to the validate list */
778 drm_intel_add_validate_buffer(target_bo);
783 drm_intel_update_buffer_offsets (drm_intel_bufmgr_gem *bufmgr_gem)
787 for (i = 0; i < bufmgr_gem->exec_count; i++) {
788 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
789 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
791 /* Update the buffer offset */
792 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
793 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
794 bo_gem->gem_handle, bo_gem->name, bo->offset,
795 bufmgr_gem->exec_objects[i].offset);
796 bo->offset = bufmgr_gem->exec_objects[i].offset;
802 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
803 drm_clip_rect_t *cliprects, int num_cliprects,
806 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
807 struct drm_i915_gem_execbuffer execbuf;
810 pthread_mutex_lock(&bufmgr_gem->lock);
811 /* Update indices and set up the validate list. */
812 drm_intel_gem_bo_process_reloc(bo);
814 /* Add the batch buffer to the validation list. There are no relocations
817 drm_intel_add_validate_buffer(bo);
819 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects;
820 execbuf.buffer_count = bufmgr_gem->exec_count;
821 execbuf.batch_start_offset = 0;
822 execbuf.batch_len = used;
823 execbuf.cliprects_ptr = (uintptr_t)cliprects;
824 execbuf.num_cliprects = num_cliprects;
829 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER, &execbuf);
830 } while (ret != 0 && errno == EAGAIN);
832 drm_intel_update_buffer_offsets (bufmgr_gem);
834 if (bufmgr_gem->bufmgr.debug)
835 drm_intel_gem_dump_validation_list(bufmgr_gem);
837 for (i = 0; i < bufmgr_gem->exec_count; i++) {
838 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
839 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
841 /* Need to call swrast on next bo_map */
844 /* Disconnect the buffer from the validate list */
845 bo_gem->validate_index = -1;
846 drm_intel_gem_bo_unreference_locked(bo);
847 bufmgr_gem->exec_bos[i] = NULL;
849 bufmgr_gem->exec_count = 0;
850 pthread_mutex_unlock(&bufmgr_gem->lock);
856 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
858 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
859 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
860 struct drm_i915_gem_pin pin;
863 pin.handle = bo_gem->gem_handle;
864 pin.alignment = alignment;
866 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin);
870 bo->offset = pin.offset;
875 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
877 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
878 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
879 struct drm_i915_gem_unpin unpin;
882 unpin.handle = bo_gem->gem_handle;
884 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
892 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
895 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
896 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
897 struct drm_i915_gem_set_tiling set_tiling;
900 set_tiling.handle = bo_gem->gem_handle;
901 set_tiling.tiling_mode = *tiling_mode;
902 set_tiling.stride = stride;
904 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
906 *tiling_mode = I915_TILING_NONE;
910 *tiling_mode = set_tiling.tiling_mode;
915 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
916 uint32_t *swizzle_mode)
918 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
919 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
920 struct drm_i915_gem_get_tiling get_tiling;
923 get_tiling.handle = bo_gem->gem_handle;
925 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
927 *tiling_mode = I915_TILING_NONE;
928 *swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
932 *tiling_mode = get_tiling.tiling_mode;
933 *swizzle_mode = get_tiling.swizzle_mode;
938 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t *name)
940 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
941 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
942 struct drm_gem_flink flink;
945 if (!bo_gem->global_name) {
946 flink.handle = bo_gem->gem_handle;
948 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
951 bo_gem->global_name = flink.name;
954 *name = bo_gem->global_name;
959 * Enables unlimited caching of buffer objects for reuse.
961 * This is potentially very memory expensive, as the cache at each bucket
962 * size is only bounded by how many buffers of that size we've managed to have
966 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
968 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
971 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
972 bufmgr_gem->cache_bucket[i].max_entries = -1;
977 * Return the additional aperture space required by the tree of buffer objects
981 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
983 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
987 if (bo == NULL || bo_gem->included_in_check_aperture)
991 bo_gem->included_in_check_aperture = 1;
993 for (i = 0; i < bo_gem->reloc_count; i++)
994 total += drm_intel_gem_bo_get_aperture_space(bo_gem->reloc_target_bo[i]);
1000 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
1001 * for the next drm_intel_bufmgr_check_aperture_space() call.
1004 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
1006 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1009 if (bo == NULL || !bo_gem->included_in_check_aperture)
1012 bo_gem->included_in_check_aperture = 0;
1014 for (i = 0; i < bo_gem->reloc_count; i++)
1015 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->reloc_target_bo[i]);
1019 * Return -1 if the batchbuffer should be flushed before attempting to
1020 * emit rendering referencing the buffers pointed to by bo_array.
1022 * This is required because if we try to emit a batchbuffer with relocations
1023 * to a tree of buffers that won't simultaneously fit in the aperture,
1024 * the rendering will return an error at a point where the software is not
1025 * prepared to recover from it.
1027 * However, we also want to emit the batchbuffer significantly before we reach
1028 * the limit, as a series of batchbuffers each of which references buffers
1029 * covering almost all of the aperture means that at each emit we end up
1030 * waiting to evict a buffer from the last rendering, and we get synchronous
1031 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
1032 * get better parallelism.
1035 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
1037 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo_array[0]->bufmgr;
1038 unsigned int total = 0;
1039 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
1042 for (i = 0; i < count; i++) {
1043 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo_array[i];
1045 total += bo_gem->reloc_tree_size;
1048 if (total > threshold) {
1050 for (i = 0; i < count; i++)
1051 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
1053 for (i = 0; i < count; i++)
1054 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
1057 if (total > bufmgr_gem->gtt_size * 3 / 4) {
1058 DBG("check_space: overflowed available aperture, %dkb vs %dkb\n",
1059 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
1062 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024 ,
1063 (int)bufmgr_gem->gtt_size / 1024);
1069 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1070 * and manage map buffer objections.
1072 * \param fd File descriptor of the opened DRM device.
1075 drm_intel_bufmgr_gem_init(int fd, int batch_size)
1077 drm_intel_bufmgr_gem *bufmgr_gem;
1078 struct drm_i915_gem_get_aperture aperture;
1081 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
1082 bufmgr_gem->fd = fd;
1084 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
1089 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1092 bufmgr_gem->gtt_size = aperture.aper_available_size;
1094 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
1096 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
1097 fprintf(stderr, "Assuming %dkB available aperture size.\n"
1098 "May lead to reduced performance or incorrect rendering.\n",
1099 (int)bufmgr_gem->gtt_size / 1024);
1102 /* Let's go with one relocation per every 2 dwords (but round down a bit
1103 * since a power of two will mean an extra page allocation for the reloc
1106 * Every 4 was too few for the blender benchmark.
1108 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
1110 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
1111 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
1112 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
1113 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
1114 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
1115 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
1116 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
1117 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
1118 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
1119 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
1120 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
1121 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
1122 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
1123 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
1124 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
1125 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
1126 bufmgr_gem->bufmgr.debug = 0;
1127 bufmgr_gem->bufmgr.check_aperture_space = drm_intel_gem_check_aperture_space;
1128 /* Initialize the linked lists for BO reuse cache. */
1129 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++)
1130 bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head;
1132 return &bufmgr_gem->bufmgr;