#ifndef ETIME
#define ETIME ETIMEDOUT
#endif
+#include "libdrm.h"
#include "libdrm_lists.h"
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
typedef struct _drm_intel_bufmgr_gem {
drm_intel_bufmgr bufmgr;
+ atomic_t refcount;
+
int fd;
int max_relocs;
int num_buckets;
time_t time;
+ drmMMListHead managers;
+
drmMMListHead named;
drmMMListHead vma_cache;
int vma_count, vma_open, vma_max;
/**
* Kenel-assigned global name for this object
+ *
+ * List contains both flink named and prime fd'd objects
*/
unsigned int global_name;
drmMMListHead name_list;
void *mem_virtual;
/** GTT virtual address for the buffer, saved across map/unmap cycles */
void *gtt_virtual;
+ /**
+ * Virtual address of the buffer allocated by user, used for userptr
+ * objects only.
+ */
+ void *user_virtual;
int map_count;
drmMMListHead vma_list;
bool reusable;
/**
+ * Boolean of whether the GPU is definitely not accessing the buffer.
+ *
+ * This is only valid when reusable, since non-reusable
+ * buffers are those that have been shared wth other
+ * processes, so we don't know their state.
+ */
+ bool idle;
+
+ /**
+ * Boolean of whether this buffer was allocated with userptr
+ */
+ bool is_userptr;
+
+ /**
* Size in bytes of this buffer and its relocation descendents.
*
* Used to avoid costly tree walking in
(unsigned long long)bo_gem->relocs[j].offset,
target_gem->gem_handle,
target_gem->name,
- target_bo->offset,
+ target_bo->offset64,
bo_gem->relocs[j].delta);
}
}
struct drm_i915_gem_busy busy;
int ret;
+ if (bo_gem->reusable && bo_gem->idle)
+ return false;
+
VG_CLEAR(busy);
busy.handle = bo_gem->gem_handle;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
-
+ if (ret == 0) {
+ bo_gem->idle = !busy.busy;
+ return busy.busy;
+ } else {
+ return false;
+ }
return (ret == 0 && busy.busy);
}
tiling, stride);
}
+static drm_intel_bo *
+drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ void *addr,
+ uint32_t tiling_mode,
+ uint32_t stride,
+ unsigned long size,
+ unsigned long flags)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+ drm_intel_bo_gem *bo_gem;
+ int ret;
+ struct drm_i915_gem_userptr userptr;
+
+ /* Tiling with userptr surfaces is not supported
+ * on all hardware so refuse it for time being.
+ */
+ if (tiling_mode != I915_TILING_NONE)
+ return NULL;
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem)
+ return NULL;
+
+ bo_gem->bo.size = size;
+
+ VG_CLEAR(userptr);
+ userptr.user_ptr = (__u64)((unsigned long)addr);
+ userptr.user_size = size;
+ userptr.flags = flags;
+
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_USERPTR,
+ &userptr);
+ if (ret != 0) {
+ DBG("bo_create_userptr: "
+ "ioctl failed with user ptr %p size 0x%lx, "
+ "user flags 0x%lx\n", addr, size, flags);
+ free(bo_gem);
+ return NULL;
+ }
+
+ bo_gem->gem_handle = userptr.handle;
+ bo_gem->bo.handle = bo_gem->gem_handle;
+ bo_gem->bo.bufmgr = bufmgr;
+ bo_gem->is_userptr = true;
+ bo_gem->bo.virtual = addr;
+ /* Save the address provided by user */
+ bo_gem->user_virtual = addr;
+ bo_gem->tiling_mode = I915_TILING_NONE;
+ bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ bo_gem->stride = 0;
+
+ DRMINITLISTHEAD(&bo_gem->name_list);
+ DRMINITLISTHEAD(&bo_gem->vma_list);
+
+ bo_gem->name = name;
+ atomic_set(&bo_gem->refcount, 1);
+ bo_gem->validate_index = -1;
+ bo_gem->reloc_tree_fences = 0;
+ bo_gem->used_as_reloc_target = false;
+ bo_gem->has_error = false;
+ bo_gem->reusable = false;
+
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
+
+ DBG("bo_create_userptr: "
+ "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
+ addr, bo_gem->gem_handle, bo_gem->name,
+ size, stride, tiling_mode);
+
+ return &bo_gem->bo;
+}
+
/**
* Returns a drm_intel_bo wrapping the given buffer object handle.
*
* This can be used when one application needs to pass a buffer object
* to another.
*/
-drm_intel_bo *
+drm_public drm_intel_bo *
drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned int handle)
* alternating names for the front/back buffer a linear search
* provides a sufficiently fast match.
*/
+ pthread_mutex_lock(&bufmgr_gem->lock);
for (list = bufmgr_gem->named.next;
list != &bufmgr_gem->named;
list = list->next) {
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
if (bo_gem->global_name == handle) {
drm_intel_gem_bo_reference(&bo_gem->bo);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return &bo_gem->bo;
}
}
- bo_gem = calloc(1, sizeof(*bo_gem));
- if (!bo_gem)
- return NULL;
-
VG_CLEAR(open_arg);
open_arg.name = handle;
ret = drmIoctl(bufmgr_gem->fd,
if (ret != 0) {
DBG("Couldn't reference %s handle 0x%08x: %s\n",
name, handle, strerror(errno));
- free(bo_gem);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return NULL;
+ }
+ /* Now see if someone has used a prime handle to get this
+ * object from the kernel before by looking through the list
+ * again for a matching gem_handle
+ */
+ for (list = bufmgr_gem->named.next;
+ list != &bufmgr_gem->named;
+ list = list->next) {
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
+ if (bo_gem->gem_handle == open_arg.handle) {
+ drm_intel_gem_bo_reference(&bo_gem->bo);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return &bo_gem->bo;
+ }
+ }
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem) {
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return NULL;
}
+
bo_gem->bo.size = open_arg.size;
bo_gem->bo.offset = 0;
+ bo_gem->bo.offset64 = 0;
bo_gem->bo.virtual = NULL;
bo_gem->bo.bufmgr = bufmgr;
bo_gem->name = name;
&get_tiling);
if (ret != 0) {
drm_intel_gem_bo_unreference(&bo_gem->bo);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return NULL;
}
bo_gem->tiling_mode = get_tiling.tiling_mode;
DRMINITLISTHEAD(&bo_gem->vma_list);
DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
return &bo_gem->bo;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
assert(atomic_read(&bo_gem->refcount) > 0);
- if (atomic_dec_and_test(&bo_gem->refcount)) {
+
+ if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
drm_intel_bufmgr_gem *bufmgr_gem =
(drm_intel_bufmgr_gem *) bo->bufmgr;
struct timespec time;
clock_gettime(CLOCK_MONOTONIC, &time);
pthread_mutex_lock(&bufmgr_gem->lock);
- drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
- drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
+
+ if (atomic_dec_and_test(&bo_gem->refcount)) {
+ drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
+ drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
+ }
+
pthread_mutex_unlock(&bufmgr_gem->lock);
}
}
struct drm_i915_gem_set_domain set_domain;
int ret;
+ if (bo_gem->is_userptr) {
+ /* Return the same user ptr */
+ bo->virtual = bo_gem->user_virtual;
+ return 0;
+ }
+
pthread_mutex_lock(&bufmgr_gem->lock);
if (bo_gem->map_count++ == 0)
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int ret;
+ if (bo_gem->is_userptr)
+ return -EINVAL;
+
if (bo_gem->map_count++ == 0)
drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
return 0;
}
-int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
+drm_public int
+drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
* undefined).
*/
-int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
+drm_public int
+drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+#ifdef HAVE_VALGRIND
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+#endif
int ret;
/* If the CPU cache isn't coherent with the GTT, then use a
static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
{
- drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bufmgr_gem *bufmgr_gem;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int ret = 0;
if (bo == NULL)
return 0;
+ if (bo_gem->is_userptr)
+ return 0;
+
+ bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+
pthread_mutex_lock(&bufmgr_gem->lock);
if (bo_gem->map_count <= 0) {
return ret;
}
-int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
+drm_public int
+drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
{
return drm_intel_gem_bo_unmap(bo);
}
struct drm_i915_gem_pwrite pwrite;
int ret;
+ if (bo_gem->is_userptr)
+ return -EINVAL;
+
VG_CLEAR(pwrite);
pwrite.handle = bo_gem->gem_handle;
pwrite.offset = offset;
struct drm_i915_gem_pread pread;
int ret;
+ if (bo_gem->is_userptr)
+ return -EINVAL;
+
VG_CLEAR(pread);
pread.handle = bo_gem->gem_handle;
pread.offset = offset;
* handle. Userspace must make sure this race does not occur if such precision
* is important.
*/
-int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
+drm_public int
+drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
* In combination with drm_intel_gem_bo_pin() and manual fence management, we
* can do tiled pixmaps this way.
*/
-void
+drm_public void
drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
target_bo_gem->gem_handle;
bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
- bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
+ bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
if (target_bo != bo)
read_domains, write_domain, true);
}
-int
+drm_public int
drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
* Any further drm_intel_bufmgr_check_aperture_space() queries
* involving this buffer in the tree are undefined after this call.
*/
-void
+drm_public void
drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int i;
struct timespec time;
clock_gettime(CLOCK_MONOTONIC, &time);
assert(bo_gem->reloc_count >= start);
+
/* Unreference the cleared target buffers */
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
for (i = start; i < bo_gem->reloc_count; i++) {
drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
if (&target_bo_gem->bo != bo) {
}
}
bo_gem->reloc_count = start;
+
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
}
/**
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
/* Update the buffer offset */
- if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
+ if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
- bo_gem->gem_handle, bo_gem->name, bo->offset,
+ bo_gem->gem_handle, bo_gem->name, bo->offset64,
(unsigned long long)bufmgr_gem->exec_objects[i].
offset);
+ bo->offset64 = bufmgr_gem->exec_objects[i].offset;
bo->offset = bufmgr_gem->exec_objects[i].offset;
}
}
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
/* Update the buffer offset */
- if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
+ if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
- bo_gem->gem_handle, bo_gem->name, bo->offset,
+ bo_gem->gem_handle, bo_gem->name, bo->offset64,
(unsigned long long)bufmgr_gem->exec2_objects[i].offset);
+ bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
bo->offset = bufmgr_gem->exec2_objects[i].offset;
}
}
bufmgr_gem->aub_offset += 4096;
}
-void
+drm_public void
drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
int x1, int y1, int width, int height,
enum aub_dump_bmp_format format,
drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ bo_gem->idle = false;
+
/* Disconnect the buffer from the validate list */
bo_gem->validate_index = -1;
bufmgr_gem->exec_bos[i] = NULL;
drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+ bo_gem->idle = false;
+
/* Disconnect the buffer from the validate list */
bo_gem->validate_index = -1;
bufmgr_gem->exec_bos[i] = NULL;
flags);
}
-int
+drm_public int
drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
int used, unsigned int flags)
{
if (ret != 0)
return -errno;
+ bo->offset64 = pin.offset;
bo->offset = pin.offset;
return 0;
}
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int ret;
+ /* Tiling with userptr surfaces is not supported
+ * on all hardware so refuse it for time being.
+ */
+ if (bo_gem->is_userptr)
+ return -EINVAL;
+
/* Linear buffers have no stride. By ensuring that we only ever use
* stride 0 with linear buffers, we simplify our code.
*/
return 0;
}
-drm_intel_bo *
+drm_public drm_intel_bo *
drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
uint32_t handle;
drm_intel_bo_gem *bo_gem;
struct drm_i915_gem_get_tiling get_tiling;
+ drmMMListHead *list;
ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
+
+ /*
+ * See if the kernel has already returned this buffer to us. Just as
+ * for named buffers, we must not create two bo's pointing at the same
+ * kernel object
+ */
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ for (list = bufmgr_gem->named.next;
+ list != &bufmgr_gem->named;
+ list = list->next) {
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
+ if (bo_gem->gem_handle == handle) {
+ drm_intel_gem_bo_reference(&bo_gem->bo);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return &bo_gem->bo;
+ }
+ }
+
if (ret) {
fprintf(stderr,"ret is %d %d\n", ret, errno);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return NULL;
}
bo_gem = calloc(1, sizeof(*bo_gem));
- if (!bo_gem)
+ if (!bo_gem) {
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return NULL;
-
+ }
/* Determine size of bo. The fd-to-handle ioctl really should
* return the size, but it doesn't. If we have kernel 3.12 or
* later, we can lseek on the prime fd to get the size. Older
bo_gem->has_error = false;
bo_gem->reusable = false;
- DRMINITLISTHEAD(&bo_gem->name_list);
DRMINITLISTHEAD(&bo_gem->vma_list);
+ DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
VG_CLEAR(get_tiling);
get_tiling.handle = bo_gem->gem_handle;
return &bo_gem->bo;
}
-int
+drm_public int
drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ if (DRMLISTEMPTY(&bo_gem->name_list))
+ DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
DRM_CLOEXEC, prime_fd) != 0)
return -errno;
VG_CLEAR(flink);
flink.handle = bo_gem->gem_handle;
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
- if (ret != 0)
+ if (ret != 0) {
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return -errno;
+ }
bo_gem->global_name = flink.name;
bo_gem->reusable = false;
- DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+ if (DRMLISTEMPTY(&bo_gem->name_list))
+ DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
}
*name = bo_gem->global_name;
* size is only bounded by how many buffers of that size we've managed to have
* in flight at once.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
* allocation. If this option is not enabled, all relocs will have fence
* register allocated.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
}
}
-void
+drm_public void
drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
return devid;
}
-int
+drm_public int
drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
* This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
* for it to have any effect.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
const char *filename)
{
* You can set up a GTT and upload your objects into the referenced
* space, then send off batchbuffers and get BMPs out the other end.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
}
}
-drm_intel_context *
+drm_public drm_intel_context *
drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
drm_intel_context *context = NULL;
int ret;
+ context = calloc(1, sizeof(*context));
+ if (!context)
+ return NULL;
+
VG_CLEAR(create);
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
if (ret != 0) {
DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
strerror(errno));
+ free(context);
return NULL;
}
- context = calloc(1, sizeof(*context));
context->ctx_id = create.ctx_id;
context->bufmgr = bufmgr;
return context;
}
-void
+drm_public void
drm_intel_gem_context_destroy(drm_intel_context *ctx)
{
drm_intel_bufmgr_gem *bufmgr_gem;
free(ctx);
}
-int
+drm_public int
+drm_intel_get_reset_stats(drm_intel_context *ctx,
+ uint32_t *reset_count,
+ uint32_t *active,
+ uint32_t *pending)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem;
+ struct drm_i915_reset_stats stats;
+ int ret;
+
+ if (ctx == NULL)
+ return -EINVAL;
+
+ memset(&stats, 0, sizeof(stats));
+
+ bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
+ stats.ctx_id = ctx->ctx_id;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GET_RESET_STATS,
+ &stats);
+ if (ret == 0) {
+ if (reset_count != NULL)
+ *reset_count = stats.reset_count;
+
+ if (active != NULL)
+ *active = stats.batch_active;
+
+ if (pending != NULL)
+ *pending = stats.batch_pending;
+ }
+
+ return ret;
+}
+
+drm_public int
drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
uint32_t offset,
uint64_t *result)
* default state (no annotations), call this function with a \c count
* of zero.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
drm_intel_aub_annotation *annotations,
unsigned count)
bo_gem->aub_annotation_count = count;
}
+static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
+
+static drm_intel_bufmgr_gem *
+drm_intel_bufmgr_gem_find(int fd)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem;
+
+ DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
+ if (bufmgr_gem->fd == fd) {
+ atomic_inc(&bufmgr_gem->refcount);
+ return bufmgr_gem;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+ if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
+ pthread_mutex_lock(&bufmgr_list_mutex);
+
+ if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
+ DRMLISTDEL(&bufmgr_gem->managers);
+ drm_intel_bufmgr_gem_destroy(bufmgr);
+ }
+
+ pthread_mutex_unlock(&bufmgr_list_mutex);
+ }
+}
+
+static bool
+has_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+ int ret;
+ void *ptr;
+ long pgsz;
+ struct drm_i915_gem_userptr userptr;
+ struct drm_gem_close close_bo;
+
+ pgsz = sysconf(_SC_PAGESIZE);
+ assert(pgsz > 0);
+
+ ret = posix_memalign(&ptr, pgsz, pgsz);
+ if (ret) {
+ DBG("Failed to get a page (%ld) for userptr detection!\n",
+ pgsz);
+ return false;
+ }
+
+ memset(&userptr, 0, sizeof(userptr));
+ userptr.user_ptr = (__u64)(unsigned long)ptr;
+ userptr.user_size = pgsz;
+
+retry:
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
+ if (ret) {
+ if (errno == ENODEV && userptr.flags == 0) {
+ userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
+ goto retry;
+ }
+ free(ptr);
+ return false;
+ }
+
+ close_bo.handle = userptr.handle;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
+ free(ptr);
+ if (ret) {
+ fprintf(stderr, "Failed to release test userptr object! (%d) "
+ "i915 kernel driver may not be sane!\n", errno);
+ return false;
+ }
+
+ return true;
+}
+
/**
* Initializes the GEM buffer manager, which uses the kernel to allocate, map,
* and manage map buffer objections.
*
* \param fd File descriptor of the opened DRM device.
*/
-drm_intel_bufmgr *
+drm_public drm_intel_bufmgr *
drm_intel_bufmgr_gem_init(int fd, int batch_size)
{
drm_intel_bufmgr_gem *bufmgr_gem;
int ret, tmp;
bool exec2 = false;
+ pthread_mutex_lock(&bufmgr_list_mutex);
+
+ bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
+ if (bufmgr_gem)
+ goto exit;
+
bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
if (bufmgr_gem == NULL)
- return NULL;
+ goto exit;
bufmgr_gem->fd = fd;
+ atomic_set(&bufmgr_gem->refcount, 1);
if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
free(bufmgr_gem);
- return NULL;
+ bufmgr_gem = NULL;
+ goto exit;
}
ret = drmIoctl(bufmgr_gem->fd,
bufmgr_gem->gen = 8;
else {
free(bufmgr_gem);
- return NULL;
+ bufmgr_gem = NULL;
+ goto exit;
}
if (IS_GEN3(bufmgr_gem->pci_device) &&
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_relaxed_fencing = ret == 0;
+ if (has_userptr(bufmgr_gem))
+ bufmgr_gem->bufmgr.bo_alloc_userptr =
+ drm_intel_gem_bo_alloc_userptr;
+
gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_wait_timeout = ret == 0;
bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
- bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
+ bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
bufmgr_gem->bufmgr.debug = 0;
bufmgr_gem->bufmgr.check_aperture_space =
drm_intel_gem_check_aperture_space;
DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
bufmgr_gem->vma_max = -1; /* unlimited by default */
- return &bufmgr_gem->bufmgr;
+ DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
+
+exit:
+ pthread_mutex_unlock(&bufmgr_list_mutex);
+
+ return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;
}