X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=intel%2Fintel_bufmgr_gem.c;h=007a6d86ca7a1af22639d73c67fde96b2b92095a;hb=6f15ca80815ae34d412c9fbdf526d69c45561a0f;hp=029ca5d89c1fd3a4620c03887d52e7dc54e35d0c;hpb=da738d1ed0a0941a0cd061395ad86072171b3242;p=platform%2Fupstream%2Flibdrm.git diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c index 029ca5d..007a6d8 100644 --- a/intel/intel_bufmgr_gem.c +++ b/intel/intel_bufmgr_gem.c @@ -149,6 +149,8 @@ struct _drm_intel_bo_gem { /** * Kenel-assigned global name for this object + * + * List contains both flink named and prime fd'd objects */ unsigned int global_name; drmMMListHead name_list; @@ -210,6 +212,15 @@ struct _drm_intel_bo_gem { bool reusable; /** + * Boolean of whether the GPU is definitely not accessing the buffer. + * + * This is only valid when reusable, since non-reusable + * buffers are those that have been shared wth other + * processes, so we don't know their state. + */ + bool idle; + + /** * Size in bytes of this buffer and its relocation descendents. * * Used to avoid costly tree walking in @@ -380,7 +391,7 @@ drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem) (unsigned long long)bo_gem->relocs[j].offset, target_gem->gem_handle, target_gem->name, - target_bo->offset, + target_bo->offset64, bo_gem->relocs[j].delta); } } @@ -565,11 +576,19 @@ drm_intel_gem_bo_busy(drm_intel_bo *bo) struct drm_i915_gem_busy busy; int ret; + if (bo_gem->reusable && bo_gem->idle) + return false; + VG_CLEAR(busy); busy.handle = bo_gem->gem_handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); - + if (ret == 0) { + bo_gem->idle = !busy.busy; + return busy.busy; + } else { + return false; + } return (ret == 0 && busy.busy); } @@ -862,10 +881,6 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, } } - bo_gem = calloc(1, sizeof(*bo_gem)); - if (!bo_gem) - return NULL; - VG_CLEAR(open_arg); open_arg.name = handle; ret = drmIoctl(bufmgr_gem->fd, @@ -874,11 +889,29 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, if (ret != 0) { DBG("Couldn't reference %s handle 0x%08x: %s\n", name, handle, strerror(errno)); - free(bo_gem); return NULL; } + /* Now see if someone has used a prime handle to get this + * object from the kernel before by looking through the list + * again for a matching gem_handle + */ + for (list = bufmgr_gem->named.next; + list != &bufmgr_gem->named; + list = list->next) { + bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); + if (bo_gem->gem_handle == open_arg.handle) { + drm_intel_gem_bo_reference(&bo_gem->bo); + return &bo_gem->bo; + } + } + + bo_gem = calloc(1, sizeof(*bo_gem)); + if (!bo_gem) + return NULL; + bo_gem->bo.size = open_arg.size; bo_gem->bo.offset = 0; + bo_gem->bo.offset64 = 0; bo_gem->bo.virtual = NULL; bo_gem->bo.bufmgr = bufmgr; bo_gem->name = name; @@ -1322,7 +1355,9 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; +#ifdef HAVE_VALGRIND drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; +#endif int ret; /* If the CPU cache isn't coherent with the GTT, then use a @@ -1672,7 +1707,7 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, target_bo_gem->gem_handle; bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains; bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain; - bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset; + bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64; bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo; if (target_bo != bo) @@ -1823,11 +1858,12 @@ drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem) drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; /* Update the buffer offset */ - if (bufmgr_gem->exec_objects[i].offset != bo->offset) { + if (bufmgr_gem->exec_objects[i].offset != bo->offset64) { DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n", - bo_gem->gem_handle, bo_gem->name, bo->offset, + bo_gem->gem_handle, bo_gem->name, bo->offset64, (unsigned long long)bufmgr_gem->exec_objects[i]. offset); + bo->offset64 = bufmgr_gem->exec_objects[i].offset; bo->offset = bufmgr_gem->exec_objects[i].offset; } } @@ -1843,10 +1879,11 @@ drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem) drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; /* Update the buffer offset */ - if (bufmgr_gem->exec2_objects[i].offset != bo->offset) { + if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) { DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n", - bo_gem->gem_handle, bo_gem->name, bo->offset, + bo_gem->gem_handle, bo_gem->name, bo->offset64, (unsigned long long)bufmgr_gem->exec2_objects[i].offset); + bo->offset64 = bufmgr_gem->exec2_objects[i].offset; bo->offset = bufmgr_gem->exec2_objects[i].offset; } } @@ -2202,6 +2239,8 @@ drm_intel_gem_bo_exec(drm_intel_bo *bo, int used, drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; + bo_gem->idle = false; + /* Disconnect the buffer from the validate list */ bo_gem->validate_index = -1; bufmgr_gem->exec_bos[i] = NULL; @@ -2297,6 +2336,8 @@ skip_execution: drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + bo_gem->idle = false; + /* Disconnect the buffer from the validate list */ bo_gem->validate_index = -1; bufmgr_gem->exec_bos[i] = NULL; @@ -2350,6 +2391,7 @@ drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment) if (ret != 0) return -errno; + bo->offset64 = pin.offset; bo->offset = pin.offset; return 0; } @@ -2451,8 +2493,25 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s uint32_t handle; drm_intel_bo_gem *bo_gem; struct drm_i915_gem_get_tiling get_tiling; + drmMMListHead *list; ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle); + + /* + * See if the kernel has already returned this buffer to us. Just as + * for named buffers, we must not create two bo's pointing at the same + * kernel object + */ + for (list = bufmgr_gem->named.next; + list != &bufmgr_gem->named; + list = list->next) { + bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); + if (bo_gem->gem_handle == handle) { + drm_intel_gem_bo_reference(&bo_gem->bo); + return &bo_gem->bo; + } + } + if (ret) { fprintf(stderr,"ret is %d %d\n", ret, errno); return NULL; @@ -2487,8 +2546,8 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s bo_gem->has_error = false; bo_gem->reusable = false; - DRMINITLISTHEAD(&bo_gem->name_list); DRMINITLISTHEAD(&bo_gem->vma_list); + DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); VG_CLEAR(get_tiling); get_tiling.handle = bo_gem->gem_handle; @@ -2513,6 +2572,9 @@ drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd) drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; + if (DRMLISTEMPTY(&bo_gem->name_list)) + DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); + if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle, DRM_CLOEXEC, prime_fd) != 0) return -errno; @@ -2542,7 +2604,8 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name) bo_gem->global_name = flink.name; bo_gem->reusable = false; - DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); + if (DRMLISTEMPTY(&bo_gem->name_list)) + DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); } *name = bo_gem->global_name; @@ -2982,15 +3045,19 @@ drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr) drm_intel_context *context = NULL; int ret; + context = calloc(1, sizeof(*context)); + if (!context) + return NULL; + VG_CLEAR(create); ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create); if (ret != 0) { DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno)); + free(context); return NULL; } - context = calloc(1, sizeof(*context)); context->ctx_id = create.ctx_id; context->bufmgr = bufmgr; @@ -3021,6 +3088,40 @@ drm_intel_gem_context_destroy(drm_intel_context *ctx) } int +drm_intel_get_reset_stats(drm_intel_context *ctx, + uint32_t *reset_count, + uint32_t *active, + uint32_t *pending) +{ + drm_intel_bufmgr_gem *bufmgr_gem; + struct drm_i915_reset_stats stats; + int ret; + + if (ctx == NULL) + return -EINVAL; + + memset(&stats, 0, sizeof(stats)); + + bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr; + stats.ctx_id = ctx->ctx_id; + ret = drmIoctl(bufmgr_gem->fd, + DRM_IOCTL_I915_GET_RESET_STATS, + &stats); + if (ret == 0) { + if (reset_count != NULL) + *reset_count = stats.reset_count; + + if (active != NULL) + *active = stats.batch_active; + + if (pending != NULL) + *pending = stats.batch_pending; + } + + return ret; +} + +int drm_intel_reg_read(drm_intel_bufmgr *bufmgr, uint32_t offset, uint64_t *result)