list_del(&cur->head);
- /* Tell the kernel we need this BO. If it still exists, we're done! */
- if (iris_bo_madvise(cur, IRIS_MADVICE_WILL_NEED)) {
- bo = cur;
- break;
+ /* Tell the kernel we need this BO and check if it still exist */
+ if (!iris_bo_madvise(cur, IRIS_MADVICE_WILL_NEED)) {
+ /* This BO was purged, throw it out and keep looking. */
+ bo_free(cur);
+ continue;
}
- /* This BO was purged, throw it out and keep looking. */
- bo_free(cur);
- }
-
- if (!bo)
- return NULL;
+ if (cur->aux_map_address) {
+ /* This buffer was associated with an aux-buffer range. We make sure
+ * that buffers are not reused from the cache while the buffer is (busy)
+ * being used by an executing batch. Since we are here, the buffer is no
+ * longer being used by a batch and the buffer was deleted (in order to
+ * end up in the cache). Therefore its old aux-buffer range can be
+ * removed from the aux-map.
+ */
+ if (cur->bufmgr->aux_map_ctx)
+ intel_aux_map_unmap_range(cur->bufmgr->aux_map_ctx, cur->address,
+ cur->size);
+ cur->aux_map_address = 0;
+ }
- if (bo->aux_map_address) {
- /* This buffer was associated with an aux-buffer range. We make sure
- * that buffers are not reused from the cache while the buffer is (busy)
- * being used by an executing batch. Since we are here, the buffer is no
- * longer being used by a batch and the buffer was deleted (in order to
- * end up in the cache). Therefore its old aux-buffer range can be
- * removed from the aux-map.
+ /* If the cached BO isn't in the right memory zone, or the alignment
+ * isn't sufficient, free the old memory and assign it a new address.
*/
- if (bo->bufmgr->aux_map_ctx)
- intel_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->address,
- bo->size);
- bo->aux_map_address = 0;
- }
+ if (memzone != iris_memzone_for_address(cur->address) ||
+ cur->address % alignment != 0) {
+ vma_free(bufmgr, cur->address, cur->size);
+ cur->address = 0ull;
+ }
- /* If the cached BO isn't in the right memory zone, or the alignment
- * isn't sufficient, free the old memory and assign it a new address.
- */
- if (memzone != iris_memzone_for_address(bo->address) ||
- bo->address % alignment != 0) {
- vma_free(bufmgr, bo->address, bo->size);
- bo->address = 0ull;
+ bo = cur;
+ break;
}
+ if (!bo)
+ return NULL;
+
/* Zero the contents if necessary. If this fails, fall back to
* allocating a fresh BO, which will always be zeroed by the kernel.
*/
if (INTEL_DEBUG(DEBUG_CAPTURE_ALL))
bo->real.kflags |= EXEC_OBJECT_CAPTURE;
bo->address = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);
-
- if (bo->address == 0ull) {
- bo_free(bo);
- bo = NULL;
- goto out;
- }
+ if (bo->address == 0ull)
+ goto err_free;
_mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
_mesa_hash_table_insert(bufmgr->name_table, &bo->real.global_name, bo);
out:
simple_mtx_unlock(&bufmgr->lock);
return bo;
+
+err_free:
+ bo_free(bo);
+ simple_mtx_unlock(&bufmgr->lock);
+ return NULL;
}
static void
* to, because it's a fairly reasonable thing to do anyway.
*/
bo->address = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 64 * 1024);
-
- if (bo->address == 0ull) {
- bo_free(bo);
- bo = NULL;
- goto out;
- }
+ if (bo->address == 0ull)
+ goto err_free;
_mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
out:
simple_mtx_unlock(&bufmgr->lock);
return bo;
+
+err_free:
+ bo_free(bo);
+ simple_mtx_unlock(&bufmgr->lock);
+ return NULL;
}
static void
simple_mtx_lock(&bufmgr->lock);
bo->address = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 64 * 1024);
- if (bo->address == 0ull) {
- free(buf);
- bo_free(bo);
- simple_mtx_unlock(&bufmgr->lock);
- return NULL;
- }
+ if (bo->address == 0ull)
+ goto err_free;
simple_mtx_unlock(&bufmgr->lock);
buf->gpu_end = buf->gpu + bo->size;
buf->map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
return buf;
+
+err_free:
+ free(buf);
+ bo_free(bo);
+ simple_mtx_unlock(&bufmgr->lock);
+ return NULL;
}
static void