bo_dump_stats(device);
}
- vk_free(&device->alloc, bo);
+ vk_free(&device->vk.alloc, bo);
return ret == 0;
}
}
}
- bo = vk_alloc(&device->alloc, sizeof(struct v3dv_bo), 8,
+ bo = vk_alloc(&device->vk.alloc, sizeof(struct v3dv_bo), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!bo) {
goto retry;
}
- vk_free(&device->alloc, bo);
+ vk_free(&device->vk.alloc, bo);
fprintf(stderr, "Failed to allocate device memory for BO\n");
return NULL;
}
uint32_t size)
{
struct list_head *new_list =
- vk_alloc(&device->alloc, sizeof(struct list_head) * size, 8,
+ vk_alloc(&device->vk.alloc, sizeof(struct list_head) * size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!new_list) {
cache->size_list = new_list;
cache->size_list_size = size;
- vk_free(&device->alloc, old_list);
+ vk_free(&device->vk.alloc, old_list);
return true;
}
v3dv_bo_cache_destroy(struct v3dv_device *device)
{
bo_cache_free_all(device, true);
- vk_free(&device->alloc, device->bo_cache.size_list);
+ vk_free(&device->vk.alloc, device->bo_cache.size_list);
if (dump_stats) {
fprintf(stderr, "BO stats after screen destroy:\n");
/* We only support one queue */
assert(pCreateInfo->queueFamilyIndex == 0);
- pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ pool = vk_object_zalloc(&device->vk, pAllocator, sizeof(*pool),
+ VK_OBJECT_TYPE_COMMAND_POOL);
if (pool == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (pAllocator)
pool->alloc = *pAllocator;
else
- pool->alloc = device->alloc;
+ pool->alloc = device->vk.alloc;
list_inithead(&pool->cmd_buffers);
struct v3dv_cmd_pool *pool,
VkCommandBufferLevel level)
{
- /* Do not reset the loader data header! If we are calling this from
- * a command buffer reset that would reset the loader's dispatch table for
- * the command buffer.
+ /* Do not reset the base object! If we are calling this from a command
+ * buffer reset that would reset the loader's dispatch table for the
+ * command buffer, and any other relevant info from vk_object_base
*/
- const uint32_t ld_size = sizeof(VK_LOADER_DATA);
- uint8_t *cmd_buffer_driver_start = ((uint8_t *) cmd_buffer) + ld_size;
- memset(cmd_buffer_driver_start, 0, sizeof(*cmd_buffer) - ld_size);
+ const uint32_t base_size = sizeof(struct vk_object_base);
+ uint8_t *cmd_buffer_driver_start = ((uint8_t *) cmd_buffer) + base_size;
+ memset(cmd_buffer_driver_start, 0, sizeof(*cmd_buffer) - base_size);
cmd_buffer->device = device;
cmd_buffer->pool = pool;
VkCommandBuffer *pCommandBuffer)
{
struct v3dv_cmd_buffer *cmd_buffer;
- cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ cmd_buffer = vk_object_zalloc(&device->vk,
+ &pool->alloc,
+ sizeof(*cmd_buffer),
+ VK_OBJECT_TYPE_COMMAND_BUFFER);
if (cmd_buffer == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
cmd_buffer_init(cmd_buffer, device, pool, level);
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
-
*pCommandBuffer = v3dv_cmd_buffer_to_handle(cmd_buffer);
return VK_SUCCESS;
list_for_each_entry_safe(struct v3dv_bo, bo, &job->bcl.bo_list, list_link) {
list_del(&bo->list_link);
- vk_free(&job->device->alloc, bo);
+ vk_free(&job->device->vk.alloc, bo);
}
list_for_each_entry_safe(struct v3dv_bo, bo, &job->rcl.bo_list, list_link) {
list_del(&bo->list_link);
- vk_free(&job->device->alloc, bo);
+ vk_free(&job->device->vk.alloc, bo);
}
list_for_each_entry_safe(struct v3dv_bo, bo, &job->indirect.bo_list, list_link) {
list_del(&bo->list_link);
- vk_free(&job->device->alloc, bo);
+ vk_free(&job->device->vk.alloc, bo);
}
}
{
assert(job->type == V3DV_JOB_TYPE_CPU_WAIT_EVENTS);
assert(job->cmd_buffer);
- vk_free(&job->cmd_buffer->device->alloc, job->cpu.event_wait.events);
+ vk_free(&job->cmd_buffer->device->vk.alloc, job->cpu.event_wait.events);
}
static void
job_destroy_cloned_gpu_cl_resources(job);
}
- vk_free(&job->device->alloc, job);
+ vk_free(&job->device->vk.alloc, job);
}
void
v3dv_cmd_buffer_private_obj_destroy_cb destroy_cb)
{
struct v3dv_cmd_buffer_private_obj *pobj =
- vk_alloc(&cmd_buffer->device->alloc, sizeof(*pobj), 8,
+ vk_alloc(&cmd_buffer->device->vk.alloc, sizeof(*pobj), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!pobj) {
v3dv_flag_oom(cmd_buffer, NULL);
assert(pobj && pobj->obj && pobj->destroy_cb);
pobj->destroy_cb(v3dv_device_to_handle(cmd_buffer->device),
pobj->obj,
- &cmd_buffer->device->alloc);
+ &cmd_buffer->device->vk.alloc);
list_del(&pobj->list_link);
- vk_free(&cmd_buffer->device->alloc, pobj);
+ vk_free(&cmd_buffer->device->vk.alloc, pobj);
}
static void
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
if (cmd_buffer->state.query.end.alloc_count > 0)
- vk_free(&cmd_buffer->device->alloc, cmd_buffer->state.query.end.states);
+ vk_free(&cmd_buffer->device->vk.alloc, cmd_buffer->state.query.end.states);
if (cmd_buffer->push_constants_resource.bo)
v3dv_bo_free(cmd_buffer->device, cmd_buffer->push_constants_resource.bo);
if (cmd_buffer->state.meta.attachments) {
assert(cmd_buffer->state.meta.attachment_alloc_count > 0);
- vk_free(&cmd_buffer->device->alloc, cmd_buffer->state.meta.attachments);
+ vk_free(&cmd_buffer->device->vk.alloc, cmd_buffer->state.meta.attachments);
}
}
{
list_del(&cmd_buffer->pool_link);
cmd_buffer_free_resources(cmd_buffer);
- vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
+ vk_object_free(&cmd_buffer->device->vk, &cmd_buffer->pool->alloc, cmd_buffer);
}
void
struct v3dv_cmd_buffer *cmd_buffer,
uint32_t subpass_idx)
{
- struct v3dv_job *job = vk_zalloc(&device->alloc,
+ struct v3dv_job *job = vk_zalloc(&device->vk.alloc,
sizeof(struct v3dv_job), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!job) {
v3dv_cmd_buffer_finish_job(cmd_buffer);
assert(cmd_buffer->state.job == NULL);
- struct v3dv_job *job = vk_zalloc(&cmd_buffer->device->alloc,
+ struct v3dv_job *job = vk_zalloc(&cmd_buffer->device->vk.alloc,
sizeof(struct v3dv_job), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
cmd_buffer_destroy(cmd_buffer);
}
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_object_free(&device->vk, pAllocator, pool);
}
void
if (state->attachment_alloc_count < pass->attachment_count) {
if (state->attachments > 0) {
assert(state->attachment_alloc_count > 0);
- vk_free(&cmd_buffer->device->alloc, state->attachments);
+ vk_free(&cmd_buffer->device->vk.alloc, state->attachments);
}
uint32_t size = sizeof(struct v3dv_cmd_buffer_attachment_state) *
pass->attachment_count;
- state->attachments = vk_zalloc(&cmd_buffer->device->alloc, size, 8,
+ state->attachments = vk_zalloc(&cmd_buffer->device->vk.alloc, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!state->attachments) {
v3dv_flag_oom(cmd_buffer, NULL);
list_inithead(dst);
list_for_each_entry(struct v3dv_bo, bo, src, list_link) {
struct v3dv_bo *clone_bo =
- vk_alloc(&cmd_buffer->device->alloc, sizeof(struct v3dv_bo), 8,
+ vk_alloc(&cmd_buffer->device->vk.alloc, sizeof(struct v3dv_bo), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!clone_bo) {
v3dv_flag_oom(cmd_buffer, NULL);
job_clone_in_cmd_buffer(struct v3dv_job *job,
struct v3dv_cmd_buffer *cmd_buffer)
{
- struct v3dv_job *clone_job = vk_alloc(&job->device->alloc,
+ struct v3dv_job *clone_job = vk_alloc(&job->device->vk.alloc,
sizeof(struct v3dv_job), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!clone_job) {
attachment_state_item_size * state->attachment_alloc_count;
if (state->meta.attachment_alloc_count < state->attachment_alloc_count) {
if (state->meta.attachment_alloc_count > 0)
- vk_free(&cmd_buffer->device->alloc, state->meta.attachments);
+ vk_free(&cmd_buffer->device->vk.alloc, state->meta.attachments);
- state->meta.attachments = vk_zalloc(&cmd_buffer->device->alloc,
+ state->meta.attachments = vk_zalloc(&cmd_buffer->device->vk.alloc,
attachment_state_total_size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!state->meta.attachments) {
struct v3dv_job *old_job = cmd_buffer->state.job;
cmd_buffer->state.job = NULL;
- struct v3dv_job *job = vk_zalloc(&cmd_buffer->device->alloc,
+ struct v3dv_job *job = vk_zalloc(&cmd_buffer->device->vk.alloc,
sizeof(struct v3dv_job), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!job) {
const uint32_t new_slot_count = MAX2(*alloc_count * 2, 4);
const uint32_t bytes = new_slot_count * slot_size;
- *ptr = vk_alloc(&cmd_buffer->device->alloc, bytes, 8,
+ *ptr = vk_alloc(&cmd_buffer->device->vk.alloc, bytes, 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (*ptr == NULL) {
fprintf(stderr, "Error: failed to allocate CPU buffer for query.\n");
struct drm_v3d_submit_tfu *tfu)
{
struct v3dv_device *device = cmd_buffer->device;
- struct v3dv_job *job = vk_zalloc(&device->alloc,
+ struct v3dv_job *job = vk_zalloc(&device->vk.alloc,
sizeof(struct v3dv_job), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!job) {
const uint32_t event_list_size = sizeof(struct v3dv_event *) * eventCount;
job->cpu.event_wait.events =
- vk_alloc(&cmd_buffer->device->alloc, event_list_size, 8,
+ vk_alloc(&cmd_buffer->device->vk.alloc, event_list_size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!job->cpu.event_wait.events) {
v3dv_flag_oom(cmd_buffer, NULL);
struct v3dv_pipeline *pipeline = cmd_buffer->state.pipeline;
assert(pipeline && pipeline->cs && pipeline->cs->nir);
- struct v3dv_job *job = vk_zalloc(&cmd_buffer->device->alloc,
+ struct v3dv_job *job = vk_zalloc(&cmd_buffer->device->vk.alloc,
sizeof(struct v3dv_job), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!job) {
assert(pCreateInfo->sType ==
VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
- layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ layout = vk_object_zalloc(&device->vk, pAllocator, sizeof(*layout),
+ VK_OBJECT_TYPE_PIPELINE_LAYOUT);
if (layout == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!pipeline_layout)
return;
- vk_free2(&device->alloc, pAllocator, pipeline_layout);
+ vk_object_free(&device->vk, pAllocator, pipeline_layout);
}
VkResult
size += sizeof(struct v3dv_descriptor_pool_entry) * pCreateInfo->maxSets;
}
- pool = vk_alloc2(&device->alloc, pAllocator, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ pool = vk_object_zalloc(&device->vk, pAllocator, size,
+ VK_OBJECT_TYPE_DESCRIPTOR_POOL);
if (!pool)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- memset(pool, 0, sizeof(*pool));
-
if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
pool->host_memory_base = (uint8_t*)pool + sizeof(struct v3dv_descriptor_pool);
pool->host_memory_ptr = pool->host_memory_base;
return VK_SUCCESS;
out_of_device_memory:
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_object_free(&device->vk, pAllocator, pool);
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
}
}
}
- vk_free2(&device->alloc, NULL, set);
+ vk_object_free(&device->vk, NULL, set);
}
void
pool->bo = NULL;
}
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_object_free(&device->vk, pAllocator, pool);
}
VkResult
for(int i = 0; i < pool->entry_count; ++i) {
descriptor_set_destroy(device, pool, pool->entries[i].set, false);
}
+ } else {
+ /* We clean-up the host memory, so when allocating a new set from the
+ * pool, it is already 0
+ */
+ uint32_t host_size = pool->host_memory_end - pool->host_memory_base;
+ memset(pool->host_memory_base, 0, host_size);
}
pool->entry_count = 0;
const VkAllocationCallbacks *pAllocator)
{
VkDescriptorSetLayoutBinding *sorted_bindings =
- vk_alloc2(&device->alloc, pAllocator,
+ vk_alloc2(&device->vk.alloc, pAllocator,
count * sizeof(VkDescriptorSetLayoutBinding),
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
uint32_t size = samplers_offset +
immutable_sampler_count * sizeof(struct v3dv_sampler);
- set_layout = vk_alloc2(&device->alloc, pAllocator, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ set_layout = vk_object_zalloc(&device->vk, pAllocator, size,
+ VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT);
if (!set_layout)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
pCreateInfo->bindingCount,
device, pAllocator);
if (!bindings) {
- vk_free2(&device->alloc, pAllocator, set_layout);
+ vk_object_free(&device->vk, pAllocator, set_layout);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
}
}
if (bindings)
- vk_free2(&device->alloc, pAllocator, bindings);
+ vk_free2(&device->vk.alloc, pAllocator, bindings);
set_layout->descriptor_count = descriptor_count;
set_layout->dynamic_offset_count = dynamic_offset_count;
if (!set_layout)
return;
- vk_free2(&device->alloc, pAllocator, set_layout);
+ vk_object_free(&device->vk, pAllocator, set_layout);
}
static VkResult
set = (struct v3dv_descriptor_set*)pool->host_memory_ptr;
pool->host_memory_ptr += mem_size;
+
+ vk_object_base_init(&device->vk, &set->base, VK_OBJECT_TYPE_DESCRIPTOR_SET);
} else {
- set = vk_alloc2(&device->alloc, NULL, mem_size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ set = vk_object_zalloc(&device->vk, NULL, mem_size,
+ VK_OBJECT_TYPE_DESCRIPTOR_SET);
if (!set)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
- memset(set, 0, mem_size);
set->pool = pool;
set->layout = layout;
if (layout->bo_size) {
if (!pool->host_memory_base && pool->entry_count == pool->max_entry_count) {
- vk_free2(&device->alloc, NULL, set);
+ vk_object_free(&device->vk, NULL, set);
return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
}
offset = pool->entries[index].offset + pool->entries[index].size;
}
if (pool->bo->size - offset < layout->bo_size) {
- vk_free2(&device->alloc, NULL, set);
+ vk_object_free(&device->vk, NULL, set);
return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
}
memmove(&pool->entries[index + 1], &pool->entries[index],
sizeof(pool->entries[0]) * (pool->entry_count - index));
} else {
assert(pool->host_memory_base);
- vk_free2(&device->alloc, NULL, set);
+ vk_object_free(&device->vk, NULL, set);
return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
}
if (!instance)
return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
- instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ vk_object_base_init(NULL, &instance->base, VK_OBJECT_TYPE_INSTANCE);
if (pAllocator)
instance->alloc = *pAllocator;
result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
if (result != VK_SUCCESS) {
+ vk_object_base_finish(&instance->base);
vk_free2(&default_alloc, pAllocator, instance);
return vk_error(NULL, result);
}
v3d_simulator_destroy(device->sim_file);
#endif
+ vk_object_base_finish(&device->base);
mtx_destroy(&device->mutex);
}
glsl_type_singleton_decref();
+ vk_object_base_finish(&instance->base);
vk_free(&instance->alloc, instance);
}
VkResult result = VK_SUCCESS;
int32_t master_fd = -1;
- device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ vk_object_base_init(NULL, &device->base, VK_OBJECT_TYPE_PHYSICAL_DEVICE);
device->instance = instance;
assert(drm_render_device);
static VkResult
queue_init(struct v3dv_device *device, struct v3dv_queue *queue)
{
- queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ vk_object_base_init(&device->vk, &queue->base, VK_OBJECT_TYPE_QUEUE);
queue->device = device;
queue->flags = 0;
queue->noop_job = NULL;
static void
queue_finish(struct v3dv_queue *queue)
{
+ vk_object_base_finish(&queue->base);
assert(list_is_empty(&queue->submit_wait_list));
if (queue->noop_job)
v3dv_job_destroy(queue->noop_job);
if (!device)
return vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ vk_device_init(&device->vk, pCreateInfo,
+ &physical_device->instance->alloc, pAllocator);
+
device->instance = instance;
device->pdevice = physical_device;
if (pAllocator)
- device->alloc = *pAllocator;
+ device->vk.alloc = *pAllocator;
else
- device->alloc = physical_device->instance->alloc;
+ device->vk.alloc = physical_device->instance->alloc;
pthread_mutex_init(&device->mutex, NULL);
return VK_SUCCESS;
fail:
- vk_free(&device->alloc, device);
+ vk_free(&device->vk.alloc, device);
return result;
}
if (mem->has_bo_ownership)
v3dv_bo_free(device, mem->bo);
else if (mem->bo)
- vk_free(&device->alloc, mem->bo);
+ vk_free(&device->vk.alloc, mem->bo);
}
static void
{
VkResult result;
- *bo = vk_alloc2(&device->alloc, pAllocator, sizeof(struct v3dv_bo), 8,
+ *bo = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(struct v3dv_bo), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (*bo == NULL) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
fail:
if (*bo) {
- vk_free2(&device->alloc, pAllocator, *bo);
+ vk_free2(&device->vk.alloc, pAllocator, *bo);
*bo = NULL;
}
return result;
/* The Vulkan 1.0.33 spec says "allocationSize must be greater than 0". */
assert(pAllocateInfo->allocationSize > 0);
- mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ mem = vk_object_zalloc(&device->vk, pAllocator, sizeof(*mem),
+ VK_OBJECT_TYPE_DEVICE_MEMORY);
if (mem == NULL)
return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
}
if (result != VK_SUCCESS) {
- vk_free2(&device->alloc, pAllocator, mem);
+ vk_object_free(&device->vk, pAllocator, mem);
return vk_error(device->instance, result);
}
device_free(device, mem);
- vk_free2(&device->alloc, pAllocator, mem);
+ vk_object_free(&device->vk, pAllocator, mem);
}
VkResult
/* We don't support any flags for now */
assert(pCreateInfo->flags == 0);
- buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ buffer = vk_object_zalloc(&device->vk, pAllocator, sizeof(*buffer),
+ VK_OBJECT_TYPE_BUFFER);
if (buffer == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!buffer)
return;
- vk_free2(&device->alloc, pAllocator, buffer);
+ vk_object_free(&device->vk, pAllocator, buffer);
}
/**
size_t size = sizeof(*framebuffer) +
sizeof(struct v3dv_image_view *) * pCreateInfo->attachmentCount;
- framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ framebuffer = vk_object_zalloc(&device->vk, pAllocator, size,
+ VK_OBJECT_TYPE_FRAMEBUFFER);
if (framebuffer == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!fb)
return;
- vk_free2(&device->alloc, pAllocator, fb);
+ vk_object_free(&device->vk, pAllocator, fb);
}
VkResult
{
V3DV_FROM_HANDLE(v3dv_device, device, _device);
struct v3dv_event *event =
- vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ vk_object_zalloc(&device->vk, pAllocator, sizeof(*event),
+ VK_OBJECT_TYPE_EVENT);
if (!event)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!event)
return;
- vk_free2(&device->alloc, pAllocator, event);
+ vk_object_free(&device->vk, pAllocator, event);
}
VkResult
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
- sampler = vk_zalloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ sampler = vk_object_zalloc(&device->vk, pAllocator, sizeof(*sampler),
+ VK_OBJECT_TYPE_SAMPLER);
if (!sampler)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!sampler)
return;
- vk_free2(&device->alloc, pAllocator, sampler);
+ vk_object_free(&device->vk, pAllocator, sampler);
}
void
const struct v3dv_format *format = v3dv_get_format(pCreateInfo->format);
v3dv_assert(format != NULL && format->supported);
- image = vk_zalloc2(&device->alloc, pAllocator, sizeof(*image), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ image = vk_object_zalloc(&device->vk, pAllocator, sizeof(*image),
+ VK_OBJECT_TYPE_IMAGE);
if (!image)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
{
V3DV_FROM_HANDLE(v3dv_device, device, _device);
V3DV_FROM_HANDLE(v3dv_image, image, _image);
- vk_free2(&device->alloc, pAllocator, image);
+
+ if (image == NULL)
+ return;
+
+ vk_object_free(&device->vk, pAllocator, image);
}
VkImageViewType
V3DV_FROM_HANDLE(v3dv_image, image, pCreateInfo->image);
struct v3dv_image_view *iview;
- iview = vk_zalloc2(&device->alloc, pAllocator, sizeof(*iview), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ iview = vk_object_zalloc(&device->vk, pAllocator, sizeof(*iview),
+ VK_OBJECT_TYPE_IMAGE_VIEW);
if (iview == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
V3DV_FROM_HANDLE(v3dv_device, device, _device);
V3DV_FROM_HANDLE(v3dv_image_view, image_view, imageView);
- vk_free2(&device->alloc, pAllocator, image_view);
+ if (image_view == NULL)
+ return;
+
+ vk_object_free(&device->vk, pAllocator, image_view);
}
static void
v3dv_buffer_from_handle(pCreateInfo->buffer);
struct v3dv_buffer_view *view =
- vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ vk_object_zalloc(&device->vk, pAllocator, sizeof(*view),
+ VK_OBJECT_TYPE_BUFFER_VIEW);
if (!view)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
V3DV_FROM_HANDLE(v3dv_device, device, _device);
V3DV_FROM_HANDLE(v3dv_buffer_view, buffer_view, bufferView);
- vk_free2(&device->alloc, pAllocator, buffer_view);
+ if (buffer_view == NULL)
+ return;
+
+ vk_object_free(&device->vk, pAllocator, buffer_view);
}
};
return v3dv_CreatePipelineLayout(v3dv_device_to_handle(device),
- &info, &device->alloc, pipeline_layout);
+ &info, &device->vk.alloc, pipeline_layout);
}
static VkResult
};
return v3dv_CreatePipelineLayout(v3dv_device_to_handle(device),
- &info, &device->alloc, pipeline_layout);
+ &info, &device->vk.alloc, pipeline_layout);
}
void
hash_table_foreach(device->meta.color_clear.cache, entry) {
struct v3dv_meta_color_clear_pipeline *item = entry->data;
- destroy_color_clear_pipeline(_device, (uintptr_t)item, &device->alloc);
+ destroy_color_clear_pipeline(_device, (uintptr_t)item, &device->vk.alloc);
}
_mesa_hash_table_destroy(device->meta.color_clear.cache, NULL);
if (device->meta.color_clear.p_layout) {
v3dv_DestroyPipelineLayout(_device, device->meta.color_clear.p_layout,
- &device->alloc);
+ &device->vk.alloc);
}
hash_table_foreach(device->meta.depth_clear.cache, entry) {
struct v3dv_meta_depth_clear_pipeline *item = entry->data;
- destroy_depth_clear_pipeline(_device, item, &device->alloc);
+ destroy_depth_clear_pipeline(_device, item, &device->vk.alloc);
}
_mesa_hash_table_destroy(device->meta.depth_clear.cache, NULL);
if (device->meta.depth_clear.p_layout) {
v3dv_DestroyPipelineLayout(_device, device->meta.depth_clear.p_layout,
- &device->alloc);
+ &device->vk.alloc);
}
}
v3dv_CreateGraphicsPipelines(v3dv_device_to_handle(device),
VK_NULL_HANDLE,
1, &info,
- &device->alloc,
+ &device->vk.alloc,
pipeline);
ralloc_free(vs_nir);
};
return v3dv_CreateRenderPass(v3dv_device_to_handle(device),
- &info, &device->alloc, pass);
+ &info, &device->vk.alloc, pass);
}
static inline uint64_t
}
}
- *pipeline = vk_zalloc2(&device->alloc, NULL, sizeof(**pipeline), 8,
+ *pipeline = vk_zalloc2(&device->vk.alloc, NULL, sizeof(**pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (*pipeline == NULL) {
VkDevice _device = v3dv_device_to_handle(device);
if (*pipeline) {
if ((*pipeline)->cached)
- v3dv_DestroyRenderPass(_device, (*pipeline)->pass, &device->alloc);
+ v3dv_DestroyRenderPass(_device, (*pipeline)->pass, &device->vk.alloc);
if ((*pipeline)->pipeline)
- v3dv_DestroyPipeline(_device, (*pipeline)->pipeline, &device->alloc);
- vk_free(&device->alloc, *pipeline);
+ v3dv_DestroyPipeline(_device, (*pipeline)->pipeline, &device->vk.alloc);
+ vk_free(&device->vk.alloc, *pipeline);
*pipeline = NULL;
}
return VK_SUCCESS;
}
- *pipeline = vk_zalloc2(&device->alloc, NULL, sizeof(**pipeline), 8,
+ *pipeline = vk_zalloc2(&device->vk.alloc, NULL, sizeof(**pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (*pipeline == NULL) {
VkDevice _device = v3dv_device_to_handle(device);
if (*pipeline) {
if ((*pipeline)->pipeline)
- v3dv_DestroyPipeline(_device, (*pipeline)->pipeline, &device->alloc);
- vk_free(&device->alloc, *pipeline);
+ v3dv_DestroyPipeline(_device, (*pipeline)->pipeline, &device->vk.alloc);
+ vk_free(&device->vk.alloc, *pipeline);
*pipeline = NULL;
}
VkImageView fb_attachment;
result = v3dv_CreateImageView(v3dv_device_to_handle(device),
&fb_layer_view_info,
- &device->alloc, &fb_attachment);
+ &device->vk.alloc, &fb_attachment);
if (result != VK_SUCCESS)
goto fail;
VkFramebuffer fb;
result = v3dv_CreateFramebuffer(device_handle, &fb_info,
- &cmd_buffer->device->alloc, &fb);
+ &cmd_buffer->device->vk.alloc, &fb);
if (result != VK_SUCCESS)
goto fail;
v3dv_return_if_oom(cmd_buffer, NULL);
job->cpu.clear_attachments.rects =
- vk_alloc(&cmd_buffer->device->alloc,
+ vk_alloc(&cmd_buffer->device->vk.alloc,
sizeof(VkClearRect) * rectCount, 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!job->cpu.clear_attachments.rects) {
result =
v3dv_CreateDescriptorSetLayout(v3dv_device_to_handle(device),
&descriptor_set_layout_info,
- &device->alloc,
+ &device->vk.alloc,
descriptor_set_layout);
if (result != VK_SUCCESS)
return false;
result =
v3dv_CreatePipelineLayout(v3dv_device_to_handle(device),
&pipeline_layout_info,
- &device->alloc,
+ &device->vk.alloc,
pipeline_layout);
return result == VK_SUCCESS;
}
for (uint32_t i = 0; i < 3; i++) {
hash_table_foreach(device->meta.blit.cache[i], entry) {
struct v3dv_meta_blit_pipeline *item = entry->data;
- v3dv_DestroyPipeline(_device, item->pipeline, &device->alloc);
- v3dv_DestroyRenderPass(_device, item->pass, &device->alloc);
- v3dv_DestroyRenderPass(_device, item->pass_no_load, &device->alloc);
- vk_free(&device->alloc, item);
+ v3dv_DestroyPipeline(_device, item->pipeline, &device->vk.alloc);
+ v3dv_DestroyRenderPass(_device, item->pass, &device->vk.alloc);
+ v3dv_DestroyRenderPass(_device, item->pass_no_load, &device->vk.alloc);
+ vk_free(&device->vk.alloc, item);
}
_mesa_hash_table_destroy(device->meta.blit.cache[i], NULL);
}
if (device->meta.blit.p_layout) {
v3dv_DestroyPipelineLayout(_device, device->meta.blit.p_layout,
- &device->alloc);
+ &device->vk.alloc);
}
if (device->meta.blit.ds_layout) {
v3dv_DestroyDescriptorSetLayout(_device, device->meta.blit.ds_layout,
- &device->alloc);
+ &device->vk.alloc);
}
}
result =
v3dv_CreateDescriptorSetLayout(v3dv_device_to_handle(device),
&ds_layout_info,
- &device->alloc,
+ &device->vk.alloc,
ds_layout);
if (result != VK_SUCCESS)
return false;
result =
v3dv_CreatePipelineLayout(v3dv_device_to_handle(device),
&p_layout_info,
- &device->alloc,
+ &device->vk.alloc,
p_layout);
return result == VK_SUCCESS;
}
for (uint32_t i = 0; i < 3; i++) {
hash_table_foreach(device->meta.texel_buffer_copy.cache[i], entry) {
struct v3dv_meta_texel_buffer_copy_pipeline *item = entry->data;
- v3dv_DestroyPipeline(_device, item->pipeline, &device->alloc);
- v3dv_DestroyRenderPass(_device, item->pass, &device->alloc);
- v3dv_DestroyRenderPass(_device, item->pass_no_load, &device->alloc);
- vk_free(&device->alloc, item);
+ v3dv_DestroyPipeline(_device, item->pipeline, &device->vk.alloc);
+ v3dv_DestroyRenderPass(_device, item->pass, &device->vk.alloc);
+ v3dv_DestroyRenderPass(_device, item->pass_no_load, &device->vk.alloc);
+ vk_free(&device->vk.alloc, item);
}
_mesa_hash_table_destroy(device->meta.texel_buffer_copy.cache[i], NULL);
}
if (device->meta.texel_buffer_copy.p_layout) {
v3dv_DestroyPipelineLayout(_device, device->meta.texel_buffer_copy.p_layout,
- &device->alloc);
+ &device->vk.alloc);
}
if (device->meta.texel_buffer_copy.ds_layout) {
v3dv_DestroyDescriptorSetLayout(_device, device->meta.texel_buffer_copy.ds_layout,
- &device->alloc);
+ &device->vk.alloc);
}
}
.queueFamilyIndexCount = 0,
.initialLayout = VK_IMAGE_LAYOUT_GENERAL,
};
- result = v3dv_CreateImage(_device, &uiview_info, &device->alloc, &uiview);
+ result = v3dv_CreateImage(_device, &uiview_info, &device->vk.alloc, &uiview);
if (result != VK_SUCCESS)
return handled;
VkImage buffer_image;
result =
- v3dv_CreateImage(_device, &image_info, &device->alloc, &buffer_image);
+ v3dv_CreateImage(_device, &image_info, &device->vk.alloc, &buffer_image);
if (result != VK_SUCCESS)
return handled;
VkImage _image;
VkResult result =
- v3dv_CreateImage(_device, &info, &cmd_buffer->device->alloc, &_image);
+ v3dv_CreateImage(_device, &info, &cmd_buffer->device->vk.alloc, &_image);
if (result != VK_SUCCESS) {
v3dv_flag_oom(cmd_buffer, NULL);
return NULL;
VkResult result =
v3dv_CreateDescriptorPool(v3dv_device_to_handle(cmd_buffer->device),
&info,
- &cmd_buffer->device->alloc,
+ &cmd_buffer->device->vk.alloc,
&cmd_buffer->meta.texel_buffer_copy.dspool);
if (result == VK_SUCCESS) {
return true;
}
- *pipeline = vk_zalloc2(&device->alloc, NULL, sizeof(**pipeline), 8,
+ *pipeline = vk_zalloc2(&device->vk.alloc, NULL, sizeof(**pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (*pipeline == NULL)
VkDevice _device = v3dv_device_to_handle(device);
if (*pipeline) {
if ((*pipeline)->pass)
- v3dv_DestroyRenderPass(_device, (*pipeline)->pass, &device->alloc);
+ v3dv_DestroyRenderPass(_device, (*pipeline)->pass, &device->vk.alloc);
if ((*pipeline)->pipeline)
- v3dv_DestroyPipeline(_device, (*pipeline)->pipeline, &device->alloc);
- vk_free(&device->alloc, *pipeline);
+ v3dv_DestroyPipeline(_device, (*pipeline)->pipeline, &device->vk.alloc);
+ vk_free(&device->vk.alloc, *pipeline);
*pipeline = NULL;
}
VkBufferView texel_buffer_view;
result = v3dv_CreateBufferView(_device, &buffer_view_info,
- &cmd_buffer->device->alloc,
+ &cmd_buffer->device->vk.alloc,
&texel_buffer_view);
if (result != VK_SUCCESS)
return handled;
};
VkImageView image_view;
result = v3dv_CreateImageView(_device, &image_view_info,
- &cmd_buffer->device->alloc, &image_view);
+ &cmd_buffer->device->vk.alloc, &image_view);
if (result != VK_SUCCESS)
goto fail;
VkFramebuffer fb;
result = v3dv_CreateFramebuffer(_device, &fb_info,
- &cmd_buffer->device->alloc, &fb);
+ &cmd_buffer->device->vk.alloc, &fb);
if (result != VK_SUCCESS)
goto fail;
VkImage buffer_image;
VkResult result =
- v3dv_CreateImage(_device, &image_info, &device->alloc, &buffer_image);
+ v3dv_CreateImage(_device, &image_info, &device->vk.alloc, &buffer_image);
if (result != VK_SUCCESS)
return handled;
.allocationSize = reqs.size,
.memoryTypeIndex = 0,
};
- result = v3dv_AllocateMemory(_device, &alloc_info, &device->alloc, &mem);
+ result = v3dv_AllocateMemory(_device, &alloc_info, &device->vk.alloc, &mem);
if (result != VK_SUCCESS)
return handled;
VkResult result;
att.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
result = v3dv_CreateRenderPass(v3dv_device_to_handle(device),
- &info, &device->alloc, pass_load);
+ &info, &device->vk.alloc, pass_load);
if (result != VK_SUCCESS)
return false;
att.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
result = v3dv_CreateRenderPass(v3dv_device_to_handle(device),
- &info, &device->alloc, pass_no_load);
+ &info, &device->vk.alloc, pass_no_load);
return result == VK_SUCCESS;
}
v3dv_CreateGraphicsPipelines(v3dv_device_to_handle(device),
VK_NULL_HANDLE,
1, &info,
- &device->alloc,
+ &device->vk.alloc,
pipeline);
ralloc_free(vs_nir);
return true;
}
- *pipeline = vk_zalloc2(&device->alloc, NULL, sizeof(**pipeline), 8,
+ *pipeline = vk_zalloc2(&device->vk.alloc, NULL, sizeof(**pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (*pipeline == NULL)
VkDevice _device = v3dv_device_to_handle(device);
if (*pipeline) {
if ((*pipeline)->pass)
- v3dv_DestroyRenderPass(_device, (*pipeline)->pass, &device->alloc);
+ v3dv_DestroyRenderPass(_device, (*pipeline)->pass, &device->vk.alloc);
if ((*pipeline)->pass_no_load)
- v3dv_DestroyRenderPass(_device, (*pipeline)->pass_no_load, &device->alloc);
+ v3dv_DestroyRenderPass(_device, (*pipeline)->pass_no_load, &device->vk.alloc);
if ((*pipeline)->pipeline)
- v3dv_DestroyPipeline(_device, (*pipeline)->pipeline, &device->alloc);
- vk_free(&device->alloc, *pipeline);
+ v3dv_DestroyPipeline(_device, (*pipeline)->pipeline, &device->vk.alloc);
+ vk_free(&device->vk.alloc, *pipeline);
*pipeline = NULL;
}
VkResult result =
v3dv_CreateDescriptorPool(v3dv_device_to_handle(cmd_buffer->device),
&info,
- &cmd_buffer->device->alloc,
+ &cmd_buffer->device->vk.alloc,
&cmd_buffer->meta.blit.dspool);
if (result == VK_SUCCESS) {
.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST,
};
VkSampler sampler;
- result = v3dv_CreateSampler(_device, &sampler_info, &device->alloc,
+ result = v3dv_CreateSampler(_device, &sampler_info, &device->vk.alloc,
&sampler);
if (result != VK_SUCCESS)
goto fail;
};
VkImageView dst_image_view;
result = v3dv_CreateImageView(_device, &dst_image_view_info,
- &device->alloc, &dst_image_view);
+ &device->vk.alloc, &dst_image_view);
if (result != VK_SUCCESS)
goto fail;
VkFramebuffer fb;
result = v3dv_CreateFramebuffer(_device, &fb_info,
- &cmd_buffer->device->alloc, &fb);
+ &cmd_buffer->device->vk.alloc, &fb);
if (result != VK_SUCCESS)
goto fail;
};
VkImageView src_image_view;
result = v3dv_CreateImageView(_device, &src_image_view_info,
- &device->alloc, &src_image_view);
+ &device->vk.alloc, &src_image_view);
if (result != VK_SUCCESS)
goto fail;
size_t attachments_offset = size;
size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
- pass = vk_alloc2(&device->alloc, pAllocator, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ pass = vk_object_zalloc(&device->vk, pAllocator, size,
+ VK_OBJECT_TYPE_RENDER_PASS);
if (pass == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- memset(pass, 0, size);
pass->attachment_count = pCreateInfo->attachmentCount;
pass->attachments = (void *) pass + attachments_offset;
pass->subpass_count = pCreateInfo->subpassCount;
const size_t subpass_attachment_bytes =
subpass_attachment_count * sizeof(struct v3dv_subpass_attachment);
pass->subpass_attachments =
- vk_alloc2(&device->alloc, pAllocator, subpass_attachment_bytes, 8,
+ vk_alloc2(&device->vk.alloc, pAllocator, subpass_attachment_bytes, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pass->subpass_attachments == NULL) {
- vk_free2(&device->alloc, pAllocator, pass);
+ vk_object_free(&device->vk, pAllocator, pass);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
} else {
if (!_pass)
return;
- vk_free2(&device->alloc, pAllocator, pass->subpass_attachments);
- vk_free2(&device->alloc, pAllocator, pass);
+ vk_free2(&device->vk.alloc, pAllocator, pass->subpass_attachments);
+ vk_object_free(&device->vk, pAllocator, pass);
}
static void
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
assert(pCreateInfo->flags == 0);
- module = vk_alloc2(&device->alloc, pAllocator,
- sizeof(*module) + pCreateInfo->codeSize, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ module = vk_object_zalloc(&device->vk, pAllocator,
+ sizeof(*module) + pCreateInfo->codeSize,
+ VK_OBJECT_TYPE_SHADER_MODULE);
if (module == NULL)
return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
*/
assert(module->nir == NULL);
- vk_free2(&device->alloc, pAllocator, module);
+ vk_object_free(&device->vk, pAllocator, module);
}
void
if (variant->assembly_bo)
v3dv_bo_free(device, variant->assembly_bo);
ralloc_free(variant->prog_data.base);
- vk_free(&device->alloc, variant);
+ vk_free(&device->vk.alloc, variant);
}
static void
ralloc_free(p_stage->nir);
if (p_stage->current_variant)
v3dv_shader_variant_unref(device, p_stage->current_variant);
- vk_free2(&device->alloc, pAllocator, p_stage);
+ vk_free2(&device->vk.alloc, pAllocator, p_stage);
}
static void
if (pipeline->default_attribute_values)
v3dv_bo_free(device, pipeline->default_attribute_values);
- vk_free2(&device->alloc, pAllocator, pipeline);
+ vk_object_free(&device->vk, pAllocator, pipeline);
}
void
struct v3dv_device *device = src->pipeline->device;
struct v3dv_pipeline_stage *p_stage =
- vk_zalloc2(&device->alloc, pAllocator, sizeof(*p_stage), 8,
+ vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*p_stage), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (p_stage == NULL)
VkResult *out_vk_result)
{
struct v3dv_shader_variant *variant =
- vk_zalloc(&device->alloc, sizeof(*variant), 8,
+ vk_zalloc(&device->vk.alloc, sizeof(*variant), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (variant == NULL) {
if (!upload_assembly(device, variant, stage, is_coord,
qpu_insts, qpu_insts_size)) {
ralloc_free(variant->prog_data.base);
- vk_free(&device->alloc, variant);
+ vk_free(&device->vk.alloc, variant);
*out_vk_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
return NULL;
gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
struct v3dv_pipeline_stage *p_stage =
- vk_zalloc2(&device->alloc, pAllocator, sizeof(*p_stage), 8,
+ vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*p_stage), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (p_stage == NULL)
"noop_fs");
struct v3dv_pipeline_stage *p_stage =
- vk_zalloc2(&device->alloc, pAllocator, sizeof(*p_stage), 8,
+ vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*p_stage), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (p_stage == NULL)
if (cache == NULL && device->instance->default_pipeline_cache_enabled)
cache = &device->default_pipeline_cache;
- pipeline = vk_zalloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ pipeline = vk_object_zalloc(&device->vk, pAllocator, sizeof(*pipeline),
+ VK_OBJECT_TYPE_PIPELINE);
+
if (pipeline == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
struct v3dv_pipeline_stage *p_stage =
- vk_zalloc2(&device->alloc, alloc, sizeof(*p_stage), 8,
+ vk_zalloc2(&device->vk.alloc, alloc, sizeof(*p_stage), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!p_stage)
return VK_ERROR_OUT_OF_HOST_MEMORY;
if (cache == NULL && device->instance->default_pipeline_cache_enabled)
cache = &device->default_pipeline_cache;
- pipeline = vk_zalloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ pipeline = vk_object_zalloc(&device->vk, pAllocator, sizeof(*pipeline),
+ VK_OBJECT_TYPE_PIPELINE);
if (pipeline == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
struct v3dv_device *device,
bool cache_enabled)
{
- cache->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
-
cache->device = device;
pthread_mutex_init(&cache->mutex, NULL);
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
assert(pCreateInfo->flags == 0);
- cache = vk_alloc2(&device->alloc, pAllocator,
- sizeof(*cache), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ cache = vk_object_zalloc(&device->vk, pAllocator,
+ sizeof(*cache),
+ VK_OBJECT_TYPE_PIPELINE_CACHE);
if (cache == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
v3dv_pipeline_cache_finish(cache);
- vk_free2(&device->alloc, pAllocator, cache);
+ vk_object_free(&device->vk, pAllocator, cache);
}
VkResult
#include <vulkan/vk_icd.h>
#include <vk_enum_to_str.h>
+#include "vk_object.h"
+
#include <xf86drm.h>
#ifdef HAVE_VALGRIND
struct v3d_simulator_file;
struct v3dv_physical_device {
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
struct v3dv_instance *instance;
};
struct v3dv_instance {
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
VkAllocationCallbacks alloc;
/* Tracks wait threads spawned from a single vkQueueSubmit call */
struct v3dv_queue_submit_wait_info {
+ /* struct vk_object_base base; ?*/
struct list_head list_link;
struct v3dv_device *device;
};
struct v3dv_queue {
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
struct v3dv_device *device;
VkDeviceQueueCreateFlags flags;
};
struct v3dv_pipeline_cache {
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
struct v3dv_device *device;
mtx_t mutex;
};
struct v3dv_device {
- VK_LOADER_DATA _loader_data;
-
- VkAllocationCallbacks alloc;
+ struct vk_device vk;
struct v3dv_instance *instance;
struct v3dv_physical_device *pdevice;
};
struct v3dv_device_memory {
+ struct vk_object_base base;
+
struct v3dv_bo *bo;
const VkMemoryType *type;
bool has_bo_ownership;
};
struct v3dv_image {
+ struct vk_object_base base;
+
VkImageType type;
VkImageAspectFlags aspects;
VkImageViewType v3dv_image_type_to_view_type(VkImageType type);
struct v3dv_image_view {
+ struct vk_object_base base;
+
const struct v3dv_image *image;
VkImageAspectFlags aspects;
VkExtent3D extent;
uint32_t v3dv_layer_offset(const struct v3dv_image *image, uint32_t level, uint32_t layer);
struct v3dv_buffer {
+ struct vk_object_base base;
+
VkDeviceSize size;
VkBufferUsageFlags usage;
uint32_t alignment;
};
struct v3dv_buffer_view {
+ struct vk_object_base base;
+
const struct v3dv_buffer *buffer;
VkFormat vk_format;
};
struct v3dv_render_pass {
+ struct vk_object_base base;
+
uint32_t attachment_count;
struct v3dv_render_pass_attachment *attachments;
};
struct v3dv_framebuffer {
+ struct vk_object_base base;
+
uint32_t width;
uint32_t height;
uint32_t layers;
struct v3dv_render_pass *pass,
uint32_t subpass_idx);
struct v3dv_cmd_pool {
+ struct vk_object_base base;
+
VkAllocationCallbacks alloc;
struct list_head cmd_buffers;
};
};
struct v3dv_query_pool {
+ struct vk_object_base base;
+
VkQueryType query_type;
uint32_t query_count;
struct v3dv_query *queries;
};
struct v3dv_cmd_buffer {
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
struct v3dv_device *device;
v3dv_cmd_buffer_private_obj_destroy_cb destroy_cb);
struct v3dv_semaphore {
+ struct vk_object_base base;
+
/* A syncobject handle associated with this semaphore */
uint32_t sync;
};
struct v3dv_fence {
+ struct vk_object_base base;
+
/* A syncobject handle associated with this fence */
uint32_t sync;
};
struct v3dv_event {
+ struct vk_object_base base;
int state;
};
struct v3dv_shader_module {
+ struct vk_object_base base;
+
/* A NIR shader. We create NIR modules for shaders that are generated
* internally by the driver.
*/
};
struct v3dv_descriptor_pool {
+ struct vk_object_base base;
+
struct v3dv_bo *bo;
/* Current offset at the descriptor bo. 0 means that we didn't use it for
* any descriptor. If the descriptor bo is NULL, current offset is
};
struct v3dv_descriptor_set {
+ struct vk_object_base base;
+
struct v3dv_descriptor_pool *pool;
const struct v3dv_descriptor_set_layout *layout;
};
struct v3dv_descriptor_set_layout {
+ struct vk_object_base base;
+
VkDescriptorSetLayoutCreateFlags flags;
/* Number of bindings in this descriptor set */
};
struct v3dv_pipeline_layout {
+ struct vk_object_base base;
+
struct {
struct v3dv_descriptor_set_layout *layout;
uint32_t dynamic_offset_start;
};
struct v3dv_sampler {
+ struct vk_object_base base;
+
bool compare_enable;
bool unnormalized_coordinates;
bool clamp_to_transparent_black_border;
}
struct v3dv_pipeline {
+ struct vk_object_base base;
+
struct v3dv_device *device;
VkShaderStageFlags active_stages;
* for occlussion queries so we should try to use that.
*/
struct v3dv_query_pool *pool =
- vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ vk_object_zalloc(&device->vk, pAllocator, sizeof(*pool),
+ VK_OBJECT_TYPE_QUERY_POOL);
if (pool == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
VkResult result;
const uint32_t pool_bytes = sizeof(struct v3dv_query) * pool->query_count;
- pool->queries = vk_alloc2(&device->alloc, pAllocator, pool_bytes, 8,
+ pool->queries = vk_alloc2(&device->vk.alloc, pAllocator, pool_bytes, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pool->queries == NULL) {
result = vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
fail_alloc_bo:
for (uint32_t j = 0; j < i; j++)
v3dv_bo_free(device, pool->queries[j].bo);
- vk_free2(&device->alloc, pAllocator, pool->queries);
+ vk_free2(&device->vk.alloc, pAllocator, pool->queries);
fail_alloc_bo_list:
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_object_free(&device->vk, pAllocator, pool);
return result;
}
v3dv_bo_free(device, pool->queries[i].bo);
}
- vk_free2(&device->alloc, pAllocator, pool->queries);
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_free2(&device->vk.alloc, pAllocator, pool->queries);
+ vk_object_free(&device->vk, pAllocator, pool);
}
static void
queue_create_noop_job(struct v3dv_queue *queue)
{
struct v3dv_device *device = queue->device;
- queue->noop_job = vk_zalloc(&device->alloc, sizeof(struct v3dv_job), 8,
+ queue->noop_job = vk_zalloc(&device->vk.alloc, sizeof(struct v3dv_job), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!queue->noop_job)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
*/
if (*wait_info == NULL) {
*wait_info =
- vk_zalloc(&device->alloc, sizeof(struct v3dv_queue_submit_wait_info), 8,
+ vk_zalloc(&device->vk.alloc, sizeof(struct v3dv_queue_submit_wait_info), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
(*wait_info)->device = device;
}
pSubmit->signalSemaphoreCount * sizeof(VkSemaphore);
wait_info->signal_semaphore_count += pSubmit->signalSemaphoreCount;
wait_info->signal_semaphores =
- vk_alloc(&device->alloc, prev_alloc_size + extra_alloc_size, 8,
+ vk_alloc(&device->vk.alloc, prev_alloc_size + extra_alloc_size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
/* Copy the old list to the new allocation and free the old list */
if (prev_count > 0) {
memcpy(wait_info->signal_semaphores, prev_list, prev_alloc_size);
- vk_free(&device->alloc, prev_list);
+ vk_free(&device->vk.alloc, prev_list);
}
/* Add the new semaphores to the list */
list_del(&wait_info->list_link);
mtx_unlock(&queue->mutex);
- vk_free(&wait_info->device->alloc, wait_info->signal_semaphores);
- vk_free(&wait_info->device->alloc, wait_info);
+ vk_free(&wait_info->device->vk.alloc, wait_info->signal_semaphores);
+ vk_free(&wait_info->device->vk.alloc, wait_info);
return NULL;
}
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO);
struct v3dv_semaphore *sem =
- vk_alloc2(&device->alloc, pAllocator, sizeof(struct v3dv_semaphore), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ vk_object_zalloc(&device->vk, pAllocator, sizeof(struct v3dv_semaphore),
+ VK_OBJECT_TYPE_SEMAPHORE);
if (sem == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
int ret = drmSyncobjCreate(device->pdevice->render_fd, 0, &sem->sync);
if (ret) {
- vk_free2(&device->alloc, pAllocator, sem);
+ vk_object_free(&device->vk, pAllocator, sem);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
if (sem->fd != -1)
close(sem->fd);
- vk_free2(&device->alloc, pAllocator, sem);
+ vk_object_free(&device->vk, pAllocator, sem);
}
VkResult
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
struct v3dv_fence *fence =
- vk_alloc2(&device->alloc, pAllocator, sizeof(struct v3dv_fence), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ vk_object_zalloc(&device->vk, pAllocator, sizeof(struct v3dv_fence),
+ VK_OBJECT_TYPE_FENCE);
if (fence == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
int ret = drmSyncobjCreate(device->pdevice->render_fd, flags, &fence->sync);
if (ret) {
- vk_free2(&device->alloc, pAllocator, fence);
+ vk_object_free(&device->vk, pAllocator, fence);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
if (fence->fd != -1)
close(fence->fd);
- vk_free2(&device->alloc, pAllocator, fence);
+ vk_object_free(&device->vk, pAllocator, fence);
}
VkResult
{
V3DV_FROM_HANDLE(v3dv_device, device, _device);
- uint32_t *syncobjs = vk_alloc(&device->alloc,
+ uint32_t *syncobjs = vk_alloc(&device->vk.alloc,
sizeof(*syncobjs) * fenceCount, 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!syncobjs)
int ret = drmSyncobjReset(device->pdevice->render_fd, syncobjs, fenceCount);
- vk_free(&device->alloc, syncobjs);
+ vk_free(&device->vk.alloc, syncobjs);
if (ret)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
const uint64_t abs_timeout = get_absolute_timeout(timeout);
- uint32_t *syncobjs = vk_alloc(&device->alloc,
+ uint32_t *syncobjs = vk_alloc(&device->vk.alloc,
sizeof(*syncobjs) * fenceCount, 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!syncobjs)
timeout, flags, NULL);
} while (ret == -ETIME && gettime_ns() < abs_timeout);
- vk_free(&device->alloc, syncobjs);
+ vk_free(&device->vk.alloc, syncobjs);
if (ret == -ETIME)
return VK_TIMEOUT;
if (pAllocator)
alloc = pAllocator;
else
- alloc = &device->alloc;
+ alloc = &device->vk.alloc;
return wsi_common_create_swapchain(wsi_device, _device,
pCreateInfo, alloc, pSwapchain);
if (pAllocator)
alloc = pAllocator;
else
- alloc = &device->alloc;
+ alloc = &device->vk.alloc;
wsi_common_destroy_swapchain(_device, swapchain, alloc);
}