return result;
struct radeon_bo_metadata md;
- device->ws->buffer_get_metadata(radv_device_memory_from_handle(memory_h)->bo, &md);
+ device->ws->buffer_get_metadata(device->ws, radv_device_memory_from_handle(memory_h)->bo, &md);
VkImageCreateInfo updated_base_info = *base_info;
if (mem->image) {
struct radeon_bo_metadata metadata;
- device->ws->buffer_get_metadata(mem->bo, &metadata);
+ device->ws->buffer_get_metadata(device->ws, mem->bo, &metadata);
struct radv_image_create_info create_info = {
.no_metadata_planes = true,
VkResult result = radv_image_create_layout(device, create_info, NULL, mem->image);
if (result != VK_SUCCESS) {
- device->ws->buffer_destroy(mem->bo);
+ device->ws->buffer_destroy(device->ws, mem->bo);
mem->bo = NULL;
return result;
}
if (alloc_size < mem->image->size) {
- device->ws->buffer_destroy(mem->bo);
+ device->ws->buffer_destroy(device->ws, mem->bo);
mem->bo = NULL;
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
}
} else if (mem->buffer) {
if (alloc_size < mem->buffer->size) {
- device->ws->buffer_destroy(mem->bo);
+ device->ws->buffer_destroy(device->ws, mem->bo);
mem->bo = NULL;
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
}
list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
&cmd_buffer->upload.list, list) {
- cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
+ cmd_buffer->device->ws->buffer_destroy(cmd_buffer->device->ws, up->upload_bo);
list_del(&up->list);
free(up);
}
if (cmd_buffer->upload.upload_bo)
- cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
+ cmd_buffer->device->ws->buffer_destroy(cmd_buffer->device->ws, cmd_buffer->upload.upload_bo);
if (cmd_buffer->cs)
cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
&cmd_buffer->upload.list, list) {
- cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
+ cmd_buffer->device->ws->buffer_destroy(cmd_buffer->device->ws, up->upload_bo);
list_del(&up->list);
free(up);
}
if (!upload) {
cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
- device->ws->buffer_destroy(bo);
+ device->ws->buffer_destroy(device->ws, bo);
return false;
}
radv_shader_variant_destroy(device, device->trap_handler_shader);
if (unlikely(device->tma_bo))
- ws->buffer_destroy(device->tma_bo);
+ ws->buffer_destroy(ws, device->tma_bo);
}
static struct radv_shader_variant *
}
if (pool->bo)
- device->ws->buffer_destroy(pool->bo);
+ device->ws->buffer_destroy(device->ws, pool->bo);
if (pool->host_bo)
vk_free2(&device->vk.alloc, pAllocator, pool->host_bo);
if (queue->continue_preamble_cs)
queue->device->ws->cs_destroy(queue->continue_preamble_cs);
if (queue->descriptor_bo)
- queue->device->ws->buffer_destroy(queue->descriptor_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, queue->descriptor_bo);
if (queue->scratch_bo)
- queue->device->ws->buffer_destroy(queue->scratch_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, queue->scratch_bo);
if (queue->esgs_ring_bo)
- queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, queue->esgs_ring_bo);
if (queue->gsvs_ring_bo)
- queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, queue->gsvs_ring_bo);
if (queue->tess_rings_bo)
- queue->device->ws->buffer_destroy(queue->tess_rings_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, queue->tess_rings_bo);
if (queue->gds_bo)
- queue->device->ws->buffer_destroy(queue->gds_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, queue->gds_bo);
if (queue->gds_oa_bo)
- queue->device->ws->buffer_destroy(queue->gds_oa_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, queue->gds_oa_bo);
if (queue->compute_scratch_bo)
- queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, queue->compute_scratch_bo);
vk_object_base_finish(&queue->base);
}
static void radv_device_finish_border_color(struct radv_device *device)
{
if (device->border_color_data.bo) {
- device->ws->buffer_destroy(device->border_color_data.bo);
+ device->ws->buffer_destroy(device->ws, device->border_color_data.bo);
mtx_destroy(&device->border_color_data.mutex);
}
radv_trap_handler_finish(device);
if (device->trace_bo)
- device->ws->buffer_destroy(device->trace_bo);
+ device->ws->buffer_destroy(device->ws, device->trace_bo);
if (device->gfx_init)
- device->ws->buffer_destroy(device->gfx_init);
+ device->ws->buffer_destroy(device->ws, device->gfx_init);
radv_device_finish_border_color(device);
return;
if (device->trace_bo)
- device->ws->buffer_destroy(device->trace_bo);
+ device->ws->buffer_destroy(device->ws, device->trace_bo);
if (device->gfx_init)
- device->ws->buffer_destroy(device->gfx_init);
+ device->ws->buffer_destroy(device->ws, device->gfx_init);
radv_device_finish_border_color(device);
if (scratch_bo != queue->scratch_bo) {
if (queue->scratch_bo)
- queue->device->ws->buffer_destroy(queue->scratch_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, queue->scratch_bo);
queue->scratch_bo = scratch_bo;
}
queue->scratch_size_per_wave = scratch_size_per_wave;
if (compute_scratch_bo != queue->compute_scratch_bo) {
if (queue->compute_scratch_bo)
- queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, queue->compute_scratch_bo);
queue->compute_scratch_bo = compute_scratch_bo;
}
queue->compute_scratch_size_per_wave = compute_scratch_size_per_wave;
if (esgs_ring_bo != queue->esgs_ring_bo) {
if (queue->esgs_ring_bo)
- queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, queue->esgs_ring_bo);
queue->esgs_ring_bo = esgs_ring_bo;
queue->esgs_ring_size = esgs_ring_size;
}
if (gsvs_ring_bo != queue->gsvs_ring_bo) {
if (queue->gsvs_ring_bo)
- queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, queue->gsvs_ring_bo);
queue->gsvs_ring_bo = gsvs_ring_bo;
queue->gsvs_ring_size = gsvs_ring_size;
}
if (descriptor_bo != queue->descriptor_bo) {
if (queue->descriptor_bo)
- queue->device->ws->buffer_destroy(queue->descriptor_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, queue->descriptor_bo);
queue->descriptor_bo = descriptor_bo;
}
if (dest_cs[i])
queue->device->ws->cs_destroy(dest_cs[i]);
if (descriptor_bo && descriptor_bo != queue->descriptor_bo)
- queue->device->ws->buffer_destroy(descriptor_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, descriptor_bo);
if (scratch_bo && scratch_bo != queue->scratch_bo)
- queue->device->ws->buffer_destroy(scratch_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, scratch_bo);
if (compute_scratch_bo && compute_scratch_bo != queue->compute_scratch_bo)
- queue->device->ws->buffer_destroy(compute_scratch_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, compute_scratch_bo);
if (esgs_ring_bo && esgs_ring_bo != queue->esgs_ring_bo)
- queue->device->ws->buffer_destroy(esgs_ring_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, esgs_ring_bo);
if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
- queue->device->ws->buffer_destroy(gsvs_ring_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, gsvs_ring_bo);
if (tess_rings_bo && tess_rings_bo != queue->tess_rings_bo)
- queue->device->ws->buffer_destroy(tess_rings_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, tess_rings_bo);
if (gds_bo && gds_bo != queue->gds_bo)
- queue->device->ws->buffer_destroy(gds_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, gds_bo);
if (gds_oa_bo && gds_oa_bo != queue->gds_oa_bo)
- queue->device->ws->buffer_destroy(gds_oa_bo);
+ queue->device->ws->buffer_destroy(queue->device->ws, gds_oa_bo);
return vk_error(queue->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
if (bind->pBinds[i].memory != VK_NULL_HANDLE)
mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
- result = device->ws->buffer_virtual_bind(buffer->bo,
+ result = device->ws->buffer_virtual_bind(device->ws,
+ buffer->bo,
bind->pBinds[i].resourceOffset,
bind->pBinds[i].size,
mem ? mem->bo : NULL,
if (bind->pBinds[i].memory != VK_NULL_HANDLE)
mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
- result = device->ws->buffer_virtual_bind(image->bo,
+ result = device->ws->buffer_virtual_bind(device->ws,
+ image->bo,
bind->pBinds[i].resourceOffset,
bind->pBinds[i].size,
mem ? mem->bo : NULL,
surface->prt_tile_height);
uint32_t size = aligned_extent_width * aligned_extent_height * bs;
- result = device->ws->buffer_virtual_bind(image->bo,
+ result = device->ws->buffer_virtual_bind(device->ws,
+ image->bo,
offset,
size,
mem ? mem->bo : NULL,
uint32_t mem_increment = aligned_extent_width * bs;
uint32_t size = mem_increment * surface->prt_tile_height;
for (unsigned y = 0; y < bind_extent.height; y += surface->prt_tile_height) {
- result = device->ws->buffer_virtual_bind(image->bo,
+ result = device->ws->buffer_virtual_bind(device->ws,
+ image->bo,
offset + img_increment * y,
size,
mem ? mem->bo : NULL,
if (memory->image && memory->image->offset == 0) {
struct radeon_bo_metadata metadata;
radv_init_metadata(device, memory->image, &metadata);
- device->ws->buffer_set_metadata(memory->bo, &metadata);
+ device->ws->buffer_set_metadata(device->ws, memory->bo, &metadata);
}
return device->ws->buffer_get_fd(device->ws, memory->bo,
}
radv_bo_list_remove(device, mem->bo);
- device->ws->buffer_destroy(mem->bo);
+ device->ws->buffer_destroy(device->ws, mem->bo);
mem->bo = NULL;
}
mem->image->info.samples == 1 &&
mem->image->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
struct radeon_bo_metadata metadata;
- device->ws->buffer_get_metadata(mem->bo, &metadata);
+ device->ws->buffer_get_metadata(device->ws, mem->bo, &metadata);
struct radv_image_create_info create_info = {
.no_metadata_planes = true,
result = radv_image_create_layout(device, create_info, NULL,
mem->image);
if (result != VK_SUCCESS) {
- device->ws->buffer_destroy(mem->bo);
+ device->ws->buffer_destroy(device->ws, mem->bo);
goto fail;
}
}
struct radv_event *event)
{
if (event->bo)
- device->ws->buffer_destroy(event->bo);
+ device->ws->buffer_destroy(device->ws, event->bo);
vk_object_base_finish(&event->base);
vk_free2(&device->vk.alloc, pAllocator, event);
struct radv_buffer *buffer)
{
if ((buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) && buffer->bo)
- device->ws->buffer_destroy(buffer->bo);
+ device->ws->buffer_destroy(device->ws, buffer->bo);
vk_object_base_finish(&buffer->base);
vk_free2(&device->vk.alloc, pAllocator, buffer);
struct radv_image *image)
{
if ((image->flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) && image->bo)
- device->ws->buffer_destroy(image->bo);
+ device->ws->buffer_destroy(device->ws, image->bo);
if (image->owned_memory != VK_NULL_HANDLE) {
RADV_FROM_HANDLE(radv_device_memory, mem, image->owned_memory);
struct radv_query_pool *pool)
{
if (pool->bo)
- device->ws->buffer_destroy(pool->bo);
+ device->ws->buffer_destroy(device->ws, pool->bo);
vk_object_base_finish(&pool->base);
vk_free2(&device->vk.alloc, pAllocator, pool);
}
enum radeon_bo_flag flags,
unsigned priority);
- void (*buffer_destroy)(struct radeon_winsys_bo *bo);
+ void (*buffer_destroy)(struct radeon_winsys *ws,
+ struct radeon_winsys_bo *bo);
void *(*buffer_map)(struct radeon_winsys_bo *bo);
struct radeon_winsys_bo *(*buffer_from_ptr)(struct radeon_winsys *ws,
void (*buffer_unmap)(struct radeon_winsys_bo *bo);
- void (*buffer_set_metadata)(struct radeon_winsys_bo *bo,
+ void (*buffer_set_metadata)(struct radeon_winsys *ws,
+ struct radeon_winsys_bo *bo,
struct radeon_bo_metadata *md);
- void (*buffer_get_metadata)(struct radeon_winsys_bo *bo,
+ void (*buffer_get_metadata)(struct radeon_winsys *ws,
+ struct radeon_winsys_bo *bo,
struct radeon_bo_metadata *md);
- VkResult (*buffer_virtual_bind)(struct radeon_winsys_bo *parent,
+ VkResult (*buffer_virtual_bind)(struct radeon_winsys *ws,
+ struct radeon_winsys_bo *parent,
uint64_t offset, uint64_t size,
struct radeon_winsys_bo *bo, uint64_t bo_offset);
VkResult (*ctx_create)(struct radeon_winsys *ws,
slab->ptr = (char*)device->ws->buffer_map(slab->bo);
if (!slab->ptr) {
- device->ws->buffer_destroy(slab->bo);
+ device->ws->buffer_destroy(device->ws, slab->bo);
free(slab);
return NULL;
}
radv_destroy_shader_slabs(struct radv_device *device)
{
list_for_each_entry_safe(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
- device->ws->buffer_destroy(slab->bo);
+ device->ws->buffer_destroy(device->ws, slab->bo);
free(slab);
}
mtx_destroy(&device->shader_slab_mutex);
struct radeon_winsys *ws = device->ws;
if (unlikely(device->thread_trace.bo))
- ws->buffer_destroy(device->thread_trace.bo);
+ ws->buffer_destroy(ws, device->thread_trace.bo);
for (unsigned i = 0; i < 2; i++) {
if (device->thread_trace.start_cs[i])
void *map = device->ws->buffer_map(device->gfx_init);
if (!map) {
- device->ws->buffer_destroy(device->gfx_init);
+ device->ws->buffer_destroy(device->ws, device->gfx_init);
device->gfx_init = NULL;
goto fail;
}
#include "util/u_math.h"
#include "util/os_time.h"
-static void radv_amdgpu_winsys_bo_destroy(struct radeon_winsys_bo *_bo);
+static void radv_amdgpu_winsys_bo_destroy(struct radeon_winsys *_ws,
+ struct radeon_winsys_bo *_bo);
static int
radv_amdgpu_bo_va_op(struct radv_amdgpu_winsys *ws,
}
static void
-radv_amdgpu_winsys_virtual_map(struct radv_amdgpu_winsys_bo *bo,
+radv_amdgpu_winsys_virtual_map(struct radv_amdgpu_winsys *ws,
+ struct radv_amdgpu_winsys_bo *bo,
const struct radv_amdgpu_map_range *range)
{
uint64_t internal_flags = 0;
assert(range->size);
if (!range->bo) {
- if (!bo->ws->info.has_sparse_vm_mappings)
+ if (!ws->info.has_sparse_vm_mappings)
return;
internal_flags |= AMDGPU_VM_PAGE_PRT;
} else
p_atomic_inc(&range->bo->ref_count);
- int r = radv_amdgpu_bo_va_op(bo->ws, range->bo ? range->bo->bo : NULL,
+ int r = radv_amdgpu_bo_va_op(ws, range->bo ? range->bo->bo : NULL,
range->bo_offset, range->size,
range->offset + bo->base.va, 0,
internal_flags, AMDGPU_VA_OP_MAP);
}
static void
-radv_amdgpu_winsys_virtual_unmap(struct radv_amdgpu_winsys_bo *bo,
+radv_amdgpu_winsys_virtual_unmap(struct radv_amdgpu_winsys *ws,
+ struct radv_amdgpu_winsys_bo *bo,
const struct radv_amdgpu_map_range *range)
{
uint64_t internal_flags = 0;
assert(range->size);
if (!range->bo) {
- if(!bo->ws->info.has_sparse_vm_mappings)
+ if(!ws->info.has_sparse_vm_mappings)
return;
/* Even though this is an unmap, if we don't set this flag,
internal_flags |= AMDGPU_VM_PAGE_PRT;
}
- int r = radv_amdgpu_bo_va_op(bo->ws, range->bo ? range->bo->bo : NULL,
+ int r = radv_amdgpu_bo_va_op(ws, range->bo ? range->bo->bo : NULL,
range->bo_offset, range->size,
range->offset + bo->base.va, 0, internal_flags,
AMDGPU_VA_OP_UNMAP);
abort();
if (range->bo)
- radv_amdgpu_winsys_bo_destroy((struct radeon_winsys_bo *)range->bo);
+ ws->base.buffer_destroy(&ws->base, (struct radeon_winsys_bo *)range->bo);
}
static int bo_comparator(const void *ap, const void *bp) {
}
static VkResult
-radv_amdgpu_winsys_bo_virtual_bind(struct radeon_winsys_bo *_parent,
+radv_amdgpu_winsys_bo_virtual_bind(struct radeon_winsys *_ws,
+ struct radeon_winsys_bo *_parent,
uint64_t offset, uint64_t size,
struct radeon_winsys_bo *_bo, uint64_t bo_offset)
{
+ struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
struct radv_amdgpu_winsys_bo *parent = (struct radv_amdgpu_winsys_bo *)_parent;
struct radv_amdgpu_winsys_bo *bo = (struct radv_amdgpu_winsys_bo*)_bo;
int range_count_delta, new_idx;
/* Any range between first and last is going to be entirely covered by the new range so just unmap them. */
for (int i = first + 1; i < last; ++i)
- radv_amdgpu_winsys_virtual_unmap(parent, parent->ranges + i);
+ radv_amdgpu_winsys_virtual_unmap(ws, parent, parent->ranges + i);
/* If the first/last range are not left alone we unmap then and optionally map
* them again after modifications. Not that this implicitly can do the splitting
new_last = parent->ranges[last];
if (parent->ranges[first].offset + parent->ranges[first].size > offset || remove_first) {
- radv_amdgpu_winsys_virtual_unmap(parent, parent->ranges + first);
+ radv_amdgpu_winsys_virtual_unmap(ws, parent, parent->ranges + first);
unmapped_first = true;
if (!remove_first) {
new_first.size = offset - new_first.offset;
- radv_amdgpu_winsys_virtual_map(parent, &new_first);
+ radv_amdgpu_winsys_virtual_map(ws, parent, &new_first);
}
}
if (parent->ranges[last].offset < offset + size || remove_last) {
if (first != last || !unmapped_first)
- radv_amdgpu_winsys_virtual_unmap(parent, parent->ranges + last);
+ radv_amdgpu_winsys_virtual_unmap(ws, parent, parent->ranges + last);
if (!remove_last) {
new_last.size -= offset + size - new_last.offset;
new_last.bo_offset += (offset + size - new_last.offset);
new_last.offset = offset + size;
- radv_amdgpu_winsys_virtual_map(parent, &new_last);
+ radv_amdgpu_winsys_virtual_map(ws, parent, &new_last);
}
}
parent->ranges[new_idx].bo = bo;
parent->ranges[new_idx].bo_offset = bo_offset;
- radv_amdgpu_winsys_virtual_map(parent, parent->ranges + new_idx);
+ radv_amdgpu_winsys_virtual_map(ws, parent, parent->ranges + new_idx);
parent->range_count += range_count_delta;
uint8_t destroyed : 1;
};
-static void radv_amdgpu_log_bo(struct radv_amdgpu_winsys_bo *bo,
+static void radv_amdgpu_log_bo(struct radv_amdgpu_winsys *ws,
+ struct radv_amdgpu_winsys_bo *bo,
bool destroyed)
{
- struct radv_amdgpu_winsys *ws = bo->ws;
struct radv_amdgpu_winsys_bo_log *bo_log = NULL;
- if (!bo->ws->debug_log_bos)
+ if (!ws->debug_log_bos)
return;
bo_log = malloc(sizeof(*bo_log));
u_rwlock_wrunlock(&ws->log_bo_list_lock);
}
-static int radv_amdgpu_global_bo_list_add(struct radv_amdgpu_winsys_bo *bo)
+static int radv_amdgpu_global_bo_list_add(struct radv_amdgpu_winsys *ws,
+ struct radv_amdgpu_winsys_bo *bo)
{
- struct radv_amdgpu_winsys *ws = bo->ws;
-
if (!ws->debug_all_bos)
return VK_SUCCESS;
return VK_SUCCESS;
}
-static void radv_amdgpu_global_bo_list_del(struct radv_amdgpu_winsys_bo *bo)
+static void radv_amdgpu_global_bo_list_del(struct radv_amdgpu_winsys *ws,
+ struct radv_amdgpu_winsys_bo *bo)
{
- struct radv_amdgpu_winsys *ws = bo->ws;
-
if (!ws->debug_all_bos)
return;
u_rwlock_wrunlock(&ws->global_bo_list.lock);
}
-static void radv_amdgpu_winsys_bo_destroy(struct radeon_winsys_bo *_bo)
+static void radv_amdgpu_winsys_bo_destroy(struct radeon_winsys *_ws,
+ struct radeon_winsys_bo *_bo)
{
+ struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
- struct radv_amdgpu_winsys *ws = bo->ws;
if (p_atomic_dec_return(&bo->ref_count))
return;
- radv_amdgpu_log_bo(bo, true);
+ radv_amdgpu_log_bo(ws, bo, true);
if (bo->is_virtual) {
for (uint32_t i = 0; i < bo->range_count; ++i) {
- radv_amdgpu_winsys_virtual_unmap(bo, bo->ranges + i);
+ radv_amdgpu_winsys_virtual_unmap(ws, bo, bo->ranges + i);
}
free(bo->bos);
free(bo->ranges);
} else {
- radv_amdgpu_global_bo_list_del(bo);
- radv_amdgpu_bo_va_op(bo->ws, bo->bo, 0, bo->size, bo->base.va,
+ radv_amdgpu_global_bo_list_del(ws, bo);
+ radv_amdgpu_bo_va_op(ws, bo->bo, 0, bo->size, bo->base.va,
0, 0, AMDGPU_VA_OP_UNMAP);
amdgpu_bo_free(bo->bo);
}
bo->base.va = va;
bo->va_handle = va_handle;
bo->size = size;
- bo->ws = ws;
bo->is_virtual = !!(flags & RADEON_FLAG_VIRTUAL);
bo->ref_count = 1;
bo->ranges[0].bo = NULL;
bo->ranges[0].bo_offset = 0;
- radv_amdgpu_winsys_virtual_map(bo, bo->ranges);
- radv_amdgpu_log_bo(bo, false);
+ radv_amdgpu_winsys_virtual_map(ws, bo, bo->ranges);
+ radv_amdgpu_log_bo(ws, bo, false);
return (struct radeon_winsys_bo *)bo;
}
p_atomic_add(&ws->allocated_gtt,
align64(bo->size, ws->info.gart_page_size));
- radv_amdgpu_global_bo_list_add(bo);
- radv_amdgpu_log_bo(bo, false);
+ radv_amdgpu_global_bo_list_add(ws, bo);
+ radv_amdgpu_log_bo(ws, bo, false);
return (struct radeon_winsys_bo *)bo;
error_va_map:
bo->va_handle = va_handle;
bo->size = size;
bo->ref_count = 1;
- bo->ws = ws;
bo->bo = buf_handle;
bo->base.initial_domain = RADEON_DOMAIN_GTT;
bo->priority = priority;
p_atomic_add(&ws->allocated_gtt,
align64(bo->size, ws->info.gart_page_size));
- radv_amdgpu_global_bo_list_add(bo);
- radv_amdgpu_log_bo(bo, false);
+ radv_amdgpu_global_bo_list_add(ws, bo);
+ radv_amdgpu_log_bo(ws, bo, false);
return (struct radeon_winsys_bo *)bo;
bo->base.initial_domain = initial;
bo->size = result.alloc_size;
bo->is_shared = true;
- bo->ws = ws;
bo->priority = priority;
bo->ref_count = 1;
p_atomic_add(&ws->allocated_gtt,
align64(bo->size, ws->info.gart_page_size));
- radv_amdgpu_global_bo_list_add(bo);
- radv_amdgpu_log_bo(bo, false);
+ radv_amdgpu_global_bo_list_add(ws, bo);
+ radv_amdgpu_log_bo(ws, bo, false);
return (struct radeon_winsys_bo *)bo;
error_va_map:
}
static void
-radv_amdgpu_winsys_bo_set_metadata(struct radeon_winsys_bo *_bo,
+radv_amdgpu_winsys_bo_set_metadata(struct radeon_winsys *_ws,
+ struct radeon_winsys_bo *_bo,
struct radeon_bo_metadata *md)
{
+ struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
struct amdgpu_bo_metadata metadata = {0};
uint64_t tiling_flags = 0;
- if (bo->ws->info.chip_class >= GFX9) {
+ if (ws->info.chip_class >= GFX9) {
tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, md->u.gfx9.swizzle_mode);
tiling_flags |= AMDGPU_TILING_SET(SCANOUT, md->u.gfx9.scanout);
} else {
}
static void
-radv_amdgpu_winsys_bo_get_metadata(struct radeon_winsys_bo *_bo,
+radv_amdgpu_winsys_bo_get_metadata(struct radeon_winsys *_ws,
+ struct radeon_winsys_bo *_bo,
struct radeon_bo_metadata *md)
{
+ struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
struct amdgpu_bo_info info = {0};
uint64_t tiling_flags = info.metadata.tiling_info;
- if (bo->ws->info.chip_class >= GFX9) {
+ if (ws->info.chip_class >= GFX9) {
md->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
md->u.gfx9.scanout = AMDGPU_TILING_GET(tiling_flags, SCANOUT);
} else {
struct radeon_winsys_bo base;
amdgpu_va_handle va_handle;
uint64_t size;
- struct radv_amdgpu_winsys *ws;
bool is_virtual;
uint8_t priority;
int ref_count;
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
if (cs->ib_buffer)
- cs->ws->base.buffer_destroy(cs->ib_buffer);
+ cs->ws->base.buffer_destroy(&cs->ws->base, cs->ib_buffer);
else
free(cs->base.buf);
for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
- cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
+ cs->ws->base.buffer_destroy(&cs->ws->base, cs->old_ib_buffers[i]);
for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
free(cs->old_cs_buffers[i].buf);
cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
if (!cs->ib_mapped) {
- ws->buffer_destroy(cs->ib_buffer);
+ ws->buffer_destroy(ws, cs->ib_buffer);
free(cs);
return NULL;
}
cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
if (!cs->ib_mapped) {
- cs->ws->base.buffer_destroy(cs->ib_buffer);
+ cs->ws->base.buffer_destroy(&cs->ws->base, cs->ib_buffer);
cs->base.cdw = 0;
/* VK_ERROR_MEMORY_MAP_FAILED is not valid for vkEndCommandBuffer. */
cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
- cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
+ cs->ws->base.buffer_destroy(&cs->ws->base, cs->old_ib_buffers[i]);
cs->num_old_ib_buffers = 0;
cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
u_rwlock_rdunlock(&aws->global_bo_list.lock);
for (unsigned j = 0; j < number_of_ibs; j++) {
- ws->buffer_destroy(bos[j]);
+ ws->buffer_destroy(ws, bos[j]);
}
free(ibs);
return VK_SUCCESS;
fail_map:
- ws->base.buffer_destroy(ctx->fence_bo);
+ ws->base.buffer_destroy(&ws->base, ctx->fence_bo);
fail_alloc:
amdgpu_cs_ctx_free(ctx->ctx);
fail_create:
static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
{
struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
- ctx->ws->base.buffer_destroy(ctx->fence_bo);
+ ctx->ws->base.buffer_destroy(&ctx->ws->base, ctx->fence_bo);
amdgpu_cs_ctx_free(ctx->ctx);
FREE(ctx);
}
{
}
-static void radv_null_winsys_bo_destroy(struct radeon_winsys_bo *_bo)
+static void radv_null_winsys_bo_destroy(struct radeon_winsys *_ws,
+ struct radeon_winsys_bo *_bo)
{
struct radv_null_winsys_bo *bo = radv_null_winsys_bo(_bo);
FREE(bo->ptr);