if (bo) {
assert(iris_bo_is_external(bo));
- assert(!bo->reusable);
+ assert(!bo->real.reusable);
/* Being non-reusable, the BO cannot be in the cache lists, but it
* may be in the zombie list if it had reached zero references, but
if (!bo)
return NULL;
- list_inithead(&bo->exports);
+ list_inithead(&bo->real.exports);
bo->hash = _mesa_hash_pointer(bo);
static void
bo_unmap(struct iris_bo *bo)
{
- VG_NOACCESS(bo->map, bo->size);
- os_munmap(bo->map, bo->size);
- bo->map = NULL;
+ VG_NOACCESS(bo->real.map, bo->size);
+ os_munmap(bo->real.map, bo->size);
+ bo->real.map = NULL;
}
static struct iris_bo *
/* Find one that's got the right mapping type. We used to swap maps
* around but the kernel doesn't allow this on discrete GPUs.
*/
- if (mmap_mode != cur->mmap_mode)
+ if (mmap_mode != cur->real.mmap_mode)
continue;
/* Try a little harder to find one that's already in the right memzone */
bo->bufmgr = bufmgr;
bo->size = bo_size;
bo->idle = true;
- bo->local = local;
+ bo->real.local = local;
if (bufmgr->vram.size == 0) {
/* Calling set_domain() will allocate pages for the BO outside of the
bo->name = name;
p_atomic_set(&bo->refcount, 1);
- bo->reusable = bucket && bufmgr->bo_reuse;
+ bo->real.reusable = bucket && bufmgr->bo_reuse;
bo->index = -1;
- bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
+ bo->real.kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
/* By default, capture all driver-internal buffers like shader kernels,
* surface states, dynamic states, border colors, and so on.
*/
if (memzone < IRIS_MEMZONE_OTHER)
- bo->kflags |= EXEC_OBJECT_CAPTURE;
+ bo->real.kflags |= EXEC_OBJECT_CAPTURE;
- assert(bo->map == NULL || bo->mmap_mode == mmap_mode);
- bo->mmap_mode = mmap_mode;
+ assert(bo->real.map == NULL || bo->real.mmap_mode == mmap_mode);
+ bo->real.mmap_mode = mmap_mode;
/* On integrated GPUs, enable snooping to ensure coherency if needed.
* For discrete, we instead use SMEM and avoid WB maps for coherency.
if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &arg) != 0)
goto err_free;
- bo->reusable = false;
+ bo->real.reusable = false;
}
DBG("bo_create: buf %d (%s) (%s memzone) (%s) %llub\n", bo->gem_handle,
- bo->name, memzone_name(memzone), bo->local ? "local" : "system",
+ bo->name, memzone_name(memzone), bo->real.local ? "local" : "system",
(unsigned long long) size);
return bo;
bo->name = name;
bo->size = size;
- bo->map = ptr;
+ bo->real.map = ptr;
bo->bufmgr = bufmgr;
- bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
+ bo->real.kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
simple_mtx_lock(&bufmgr->lock);
bo->address = vma_alloc(bufmgr, memzone, size, 1);
goto err_close;
p_atomic_set(&bo->refcount, 1);
- bo->userptr = true;
+ bo->real.userptr = true;
bo->index = -1;
bo->idle = true;
- bo->mmap_mode = IRIS_MMAP_WB;
+ bo->real.mmap_mode = IRIS_MMAP_WB;
return bo;
bo->bufmgr = bufmgr;
bo->gem_handle = open_arg.handle;
bo->name = name;
- bo->global_name = handle;
- bo->reusable = false;
- bo->imported = true;
- bo->mmap_mode = IRIS_MMAP_NONE;
- bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
+ bo->real.global_name = handle;
+ bo->real.reusable = false;
+ bo->real.imported = true;
+ bo->real.mmap_mode = IRIS_MMAP_NONE;
+ bo->real.kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
bo->address = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);
_mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
- _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
+ _mesa_hash_table_insert(bufmgr->name_table, &bo->real.global_name, bo);
DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name);
if (iris_bo_is_external(bo)) {
struct hash_entry *entry;
- if (bo->global_name) {
- entry = _mesa_hash_table_search(bufmgr->name_table, &bo->global_name);
+ if (bo->real.global_name) {
+ entry = _mesa_hash_table_search(bufmgr->name_table,
+ &bo->real.global_name);
_mesa_hash_table_remove(bufmgr->name_table, entry);
}
entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
_mesa_hash_table_remove(bufmgr->handle_table, entry);
- list_for_each_entry_safe(struct bo_export, export, &bo->exports, link) {
+ list_for_each_entry_safe(struct bo_export, export, &bo->real.exports, link) {
struct drm_gem_close close = { .handle = export->gem_handle };
intel_ioctl(export->drm_fd, DRM_IOCTL_GEM_CLOSE, &close);
free(export);
}
} else {
- assert(list_is_empty(&bo->exports));
+ assert(list_is_empty(&bo->real.exports));
}
/* Close this object */
{
struct iris_bufmgr *bufmgr = bo->bufmgr;
- if (!bo->userptr && bo->map)
+ if (!bo->real.userptr && bo->real.map)
bo_unmap(bo);
if (bo->idle) {
struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
- if (time - bo->free_time <= 1)
+ if (time - bo->real.free_time <= 1)
break;
list_del(&bo->head);
struct bo_cache_bucket *bucket = &bufmgr->local_cache_bucket[i];
list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
- if (time - bo->free_time <= 1)
+ if (time - bo->real.free_time <= 1)
break;
list_del(&bo->head);
DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);
bucket = NULL;
- if (bo->reusable)
- bucket = bucket_for_size(bufmgr, bo->size, bo->local);
+ if (bo->real.reusable)
+ bucket = bucket_for_size(bufmgr, bo->size, bo->real.local);
/* Put the buffer into our internal cache for reuse if we can. */
if (bucket && iris_bo_madvise(bo, I915_MADV_DONTNEED)) {
- bo->free_time = time;
+ bo->real.free_time = time;
bo->name = NULL;
list_addtail(&bo->head, &bucket->head);
struct iris_bufmgr *bufmgr = bo->bufmgr;
assert(bufmgr->vram.size == 0);
- assert(bo->mmap_mode == IRIS_MMAP_WB || bo->mmap_mode == IRIS_MMAP_WC);
+ assert(bo->real.mmap_mode == IRIS_MMAP_WB ||
+ bo->real.mmap_mode == IRIS_MMAP_WC);
struct drm_i915_gem_mmap mmap_arg = {
.handle = bo->gem_handle,
.size = bo->size,
- .flags = bo->mmap_mode == IRIS_MMAP_WC ? I915_MMAP_WC : 0,
+ .flags = bo->real.mmap_mode == IRIS_MMAP_WC ? I915_MMAP_WC : 0,
};
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
* across PCIe, it's always snooped. The only caching mode allowed by
* DG1 hardware for LMEM is WC.
*/
- if (bo->local)
- assert(bo->mmap_mode == IRIS_MMAP_WC);
+ if (bo->real.local)
+ assert(bo->real.mmap_mode == IRIS_MMAP_WC);
else
- assert(bo->mmap_mode == IRIS_MMAP_WB);
+ assert(bo->real.mmap_mode == IRIS_MMAP_WB);
mmap_arg.flags = I915_MMAP_OFFSET_FIXED;
} else {
[IRIS_MMAP_WC] = I915_MMAP_OFFSET_WC,
[IRIS_MMAP_WB] = I915_MMAP_OFFSET_WB,
};
- assert(bo->mmap_mode != IRIS_MMAP_NONE);
- assert(bo->mmap_mode < ARRAY_SIZE(mmap_offset_for_mode));
- mmap_arg.flags = mmap_offset_for_mode[bo->mmap_mode];
+ assert(bo->real.mmap_mode != IRIS_MMAP_NONE);
+ assert(bo->real.mmap_mode < ARRAY_SIZE(mmap_offset_for_mode));
+ mmap_arg.flags = mmap_offset_for_mode[bo->real.mmap_mode];
}
/* Get the fake offset back */
{
struct iris_bufmgr *bufmgr = bo->bufmgr;
- assert(bo->mmap_mode != IRIS_MMAP_NONE);
- if (bo->mmap_mode == IRIS_MMAP_NONE)
+ assert(bo->real.mmap_mode != IRIS_MMAP_NONE);
+ if (bo->real.mmap_mode == IRIS_MMAP_NONE)
return NULL;
- if (!bo->map) {
+ if (!bo->real.map) {
DBG("iris_bo_map: %d (%s)\n", bo->gem_handle, bo->name);
void *map = bufmgr->has_mmap_offset ? iris_bo_gem_mmap_offset(dbg, bo)
: iris_bo_gem_mmap_legacy(dbg, bo);
VG_DEFINED(map, bo->size);
- if (p_atomic_cmpxchg(&bo->map, NULL, map)) {
+ if (p_atomic_cmpxchg(&bo->real.map, NULL, map)) {
VG_NOACCESS(map, bo->size);
os_munmap(map, bo->size);
}
}
- assert(bo->map);
+ assert(bo->real.map);
- DBG("iris_bo_map: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->map);
+ DBG("iris_bo_map: %d (%s) -> %p\n",
+ bo->gem_handle, bo->name, bo->real.map);
print_flags(flags);
if (!(flags & MAP_ASYNC)) {
bo_wait_with_stall_warning(dbg, bo, "memory mapping");
}
- return bo->map;
+ return bo->real.map;
}
/** Waits for all GPU rendering with the object to have completed. */
bo->bufmgr = bufmgr;
bo->name = "prime";
- bo->reusable = false;
- bo->imported = true;
- bo->mmap_mode = IRIS_MMAP_NONE;
- bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
+ bo->real.reusable = false;
+ bo->real.imported = true;
+ bo->real.mmap_mode = IRIS_MMAP_NONE;
+ bo->real.kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
/* From the Bspec, Memory Compression - Gfx12:
*
if (!iris_bo_is_external(bo))
_mesa_hash_table_insert(bo->bufmgr->handle_table, &bo->gem_handle, bo);
- if (!bo->exported) {
+ if (!bo->real.exported) {
/* If a BO is going to be used externally, it could be sent to the
* display HW. So make sure our CPU mappings don't assume cache
* coherency since display is outside that cache.
*/
- bo->exported = true;
- bo->reusable = false;
+ bo->real.exported = true;
+ bo->real.reusable = false;
}
}
{
struct iris_bufmgr *bufmgr = bo->bufmgr;
- if (bo->exported) {
- assert(!bo->reusable);
+ if (bo->real.exported) {
+ assert(!bo->real.reusable);
return;
}
{
struct iris_bufmgr *bufmgr = bo->bufmgr;
- if (!bo->global_name) {
+ if (!bo->real.global_name) {
struct drm_gem_flink flink = { .handle = bo->gem_handle };
if (intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
return -errno;
simple_mtx_lock(&bufmgr->lock);
- if (!bo->global_name) {
+ if (!bo->real.global_name) {
iris_bo_mark_exported_locked(bo);
- bo->global_name = flink.name;
- _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
+ bo->real.global_name = flink.name;
+ _mesa_hash_table_insert(bufmgr->name_table, &bo->real.global_name, bo);
}
simple_mtx_unlock(&bufmgr->lock);
}
- *name = bo->global_name;
+ *name = bo->real.global_name;
return 0;
}
}
bool found = false;
- list_for_each_entry(struct bo_export, iter, &bo->exports, link) {
+ list_for_each_entry(struct bo_export, iter, &bo->real.exports, link) {
if (iter->drm_fd != drm_fd)
continue;
/* Here we assume that for a given DRM fd, we'll always get back the
break;
}
if (!found)
- list_addtail(&export->link, &bo->exports);
+ list_addtail(&export->link, &bo->real.exports);
simple_mtx_unlock(&bufmgr->lock);
bo->name = "aux-map";
p_atomic_set(&bo->refcount, 1);
bo->index = -1;
- bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED |
- EXEC_OBJECT_CAPTURE;
- bo->mmap_mode = local ? IRIS_MMAP_WC : IRIS_MMAP_WB;
+ bo->real.kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED |
+ EXEC_OBJECT_CAPTURE;
+ bo->real.mmap_mode = local ? IRIS_MMAP_WC : IRIS_MMAP_WB;
buf->driver_bo = bo;
buf->gpu = bo->address;