return ret;
}
+static bool
+i915_gem_vm_bind(struct iris_bo *bo)
+{
+ /*
+ * i915 does not support VM_BIND yet. The binding operation happens at
+ * submission when we supply BO handle & offset in the execbuffer list.
+ */
+ return true;
+}
+
+static bool
+i915_gem_vm_unbind(struct iris_bo *bo)
+{
+ return true;
+}
+
const struct iris_kmd_backend *i915_get_backend(void)
{
static const struct iris_kmd_backend i915_backend = {
.gem_mmap = i915_gem_mmap,
.batch_check_for_reset = i915_batch_check_for_reset,
.batch_submit = i915_batch_submit,
+ .gem_vm_bind = i915_gem_vm_bind,
+ .gem_vm_unbind = i915_gem_vm_unbind,
};
return &i915_backend;
}
*/
if (memzone != iris_memzone_for_address(cur->address) ||
cur->address % alignment != 0) {
+ if (!bufmgr->kmd_backend->gem_vm_unbind(cur)) {
+ DBG("Unable to unbind vm of buf %u\n", cur->gem_handle);
+ bo_free(cur);
+ continue;
+ }
+
vma_free(bufmgr, cur->address, cur->size);
cur->address = 0ull;
}
if (bo->address == 0ull)
goto err_free;
+
+ if (!bufmgr->kmd_backend->gem_vm_bind(bo))
+ goto err_vm_alloc;
}
bo->name = name;
return bo;
+err_vm_alloc:
+ vma_free(bufmgr, bo->address, bo->size);
err_free:
simple_mtx_lock(&bufmgr->lock);
bo_free(bo);
if (bo->address == 0ull)
goto err_free;
+ if (!bufmgr->kmd_backend->gem_vm_bind(bo))
+ goto err_vm_alloc;
+
_mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
_mesa_hash_table_insert(bufmgr->name_table, &bo->real.global_name, bo);
simple_mtx_unlock(&bufmgr->lock);
return bo;
+err_vm_alloc:
+ vma_free(bufmgr, bo->address, bo->size);
err_free:
bo_free(bo);
simple_mtx_unlock(&bufmgr->lock);
assert(list_is_empty(&bo->real.exports));
}
+ /* Unbind and return the VMA for reuse */
+ if (bufmgr->kmd_backend->gem_vm_unbind(bo))
+ vma_free(bo->bufmgr, bo->address, bo->size);
+ else
+ DBG("Unable to unbind vm of buf %u\n", bo->gem_handle);
+
/* Close this object */
struct drm_gem_close close = { .handle = bo->gem_handle };
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
bo->size);
}
- /* Return the VMA for reuse */
- vma_free(bo->bufmgr, bo->address, bo->size);
-
for (int d = 0; d < bo->deps_size; d++) {
for (int b = 0; b < IRIS_BATCH_COUNT; b++) {
iris_syncobj_reference(bufmgr, &bo->deps[d].write_syncobjs[b], NULL);
if (bo->address == 0ull)
goto err_free;
+ if (!bufmgr->kmd_backend->gem_vm_bind(bo))
+ goto err_vm_alloc;
+
_mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
out:
simple_mtx_unlock(&bufmgr->lock);
return bo;
+err_vm_alloc:
+ vma_free(bufmgr, bo->address, bo->size);
err_free:
bo_free(bo);
simple_mtx_unlock(&bufmgr->lock);
if (bo->address == 0ull)
goto err_free;
+ if (!bufmgr->kmd_backend->gem_vm_bind(bo))
+ goto err_vm_alloc;
+
simple_mtx_unlock(&bufmgr->lock);
bo->name = "aux-map";
buf->map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
return buf;
+err_vm_alloc:
+ vma_free(bufmgr, bo->address, bo->size);
err_free:
free(buf);
bo_free(bo);