int num_buckets;
time_t time;
+ drmMMListHead named;
+
uint64_t gtt_size;
int available_fences;
int pci_device;
* Kenel-assigned global name for this object
*/
unsigned int global_name;
+ drmMMListHead name_list;
/**
* Index of the buffer within the validation list while preparing a
drm_intel_gem_bo_free(&bo_gem->bo);
return NULL;
}
+
+ DRMINITLISTHEAD(&bo_gem->name_list);
}
bo_gem->name = name;
int ret;
struct drm_gem_open open_arg;
struct drm_i915_gem_get_tiling get_tiling;
+ drmMMListHead *list;
+
+ /* At the moment most applications only have a few named bo.
+ * For instance, in a DRI client only the render buffers passed
+ * between X and the client are named. And since X returns the
+ * alternating names for the front/back buffer a linear search
+ * provides a sufficiently fast match.
+ */
+ for (list = bufmgr_gem->named.next;
+ list != &bufmgr_gem->named;
+ list = list->next) {
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
+ if (bo_gem->global_name == handle) {
+ drm_intel_gem_bo_reference(&bo_gem->bo);
+ return &bo_gem->bo;
+ }
+ }
bo_gem = calloc(1, sizeof(*bo_gem));
if (!bo_gem)
/* XXX stride is unknown */
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
+ DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
return &bo_gem->bo;
bo_gem->relocs = NULL;
}
+ DRMLISTDEL(&bo_gem->name_list);
+
bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
/* Put the buffer into our internal cache for reuse if we can. */
if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
return -errno;
bo_gem->global_name = flink.name;
bo_gem->reusable = 0;
+
+ DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
}
*name = bo_gem->global_name;
drm_intel_gem_get_pipe_from_crtc_id;
bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
+ DRMINITLISTHEAD(&bufmgr_gem->named);
init_cache_buckets(bufmgr_gem);
return &bufmgr_gem->bufmgr;