{
PVR_FROM_HANDLE(pvr_cmd_buffer, cmd_buffer, commandBuffer);
PVR_FROM_HANDLE(pvr_buffer, dst, dstBuffer);
- struct pvr_bo *pvr_bo;
+ struct pvr_suballoc_bo *pvr_bo;
VkResult result;
PVR_CHECK_COMMAND_BUFFER_BUILDING_STATE(cmd_buffer);
return;
pvr_cmd_copy_buffer_region(cmd_buffer,
- pvr_bo->vma->dev_addr,
+ pvr_bo->dev_addr,
0,
dst->dev_addr,
dstOffset,
if (vs_has_rt_id_output) {
const struct pvr_device_static_clear_state *dev_clear_state =
&cmd_buffer->device->static_clear_state;
- const struct pvr_bo *multi_layer_vert_bo =
+ const struct pvr_suballoc_bo *multi_layer_vert_bo =
dev_clear_state->usc_multi_layer_vertex_shader_bo;
/* We can't use the device's passthrough pds program since it doesn't
for (uint32_t j = 0; j < rect_count; j++) {
struct pvr_pds_upload pds_program_data_upload;
const VkClearRect *clear_rect = &rects[j];
- struct pvr_bo *vertices_bo;
+ struct pvr_suballoc_bo *vertices_bo;
uint32_t *vdm_cs_buffer;
VkResult result;
VkResult pvr_clear_vertices_upload(struct pvr_device *device,
const VkRect2D *rect,
float depth,
- struct pvr_bo **const pvr_bo_out)
+ struct pvr_suballoc_bo **const pvr_bo_out)
{
const float y1 = (float)(rect->offset.y + rect->extent.height);
const float x1 = (float)(rect->offset.x + rect->extent.width);
return VK_SUCCESS;
err_free_pds_program:
- pvr_bo_free(device, state->pds.pvr_bo);
+ pvr_bo_suballoc_free(state->pds.pvr_bo);
err_free_vertices_buffer:
- pvr_bo_free(device, state->vertices_bo);
+ pvr_bo_suballoc_free(state->vertices_bo);
err_free_usc_shader:
- pvr_bo_free(device, state->usc_vertex_shader_bo);
+ pvr_bo_suballoc_free(state->usc_vertex_shader_bo);
err_free_usc_multi_layer_shader:
- pvr_bo_free(device, state->usc_multi_layer_vertex_shader_bo);
+ pvr_bo_suballoc_free(state->usc_multi_layer_vertex_shader_bo);
return result;
}
pvr_device_finish_clear_attachment_programs(device);
- pvr_bo_free(device, state->pds.pvr_bo);
- pvr_bo_free(device, state->vertices_bo);
- pvr_bo_free(device, state->usc_vertex_shader_bo);
- pvr_bo_free(device, state->usc_multi_layer_vertex_shader_bo);
+ pvr_bo_suballoc_free(state->pds.pvr_bo);
+ pvr_bo_suballoc_free(state->vertices_bo);
+ pvr_bo_suballoc_free(state->usc_vertex_shader_bo);
+ pvr_bo_suballoc_free(state->usc_multi_layer_vertex_shader_bo);
}
void pvr_pds_clear_vertex_shader_program_init_base(
struct pvr_pds_vertex_shader_program *program,
- const struct pvr_bo *usc_shader_bo)
+ const struct pvr_suballoc_bo *usc_shader_bo)
{
*program = (struct pvr_pds_vertex_shader_program){
.num_streams = 1,
};
pvr_pds_setup_doutu(&program->usc_task_control,
- usc_shader_bo->vma->dev_addr.addr,
+ usc_shader_bo->dev_addr.addr,
0,
PVRX(PDSINST_DOUTU_SAMPLE_RATE_INSTANCE),
false);
VkResult pvr_pds_clear_vertex_shader_program_create_and_upload(
struct pvr_pds_vertex_shader_program *program,
struct pvr_device *device,
- const struct pvr_bo *vertices_bo,
+ const struct pvr_suballoc_bo *vertices_bo,
struct pvr_pds_upload *const upload_out)
{
const struct pvr_device_info *dev_info = &device->pdevice->dev_info;
uint32_t *staging_buffer;
VkResult result;
- program->streams[0].address = vertices_bo->vma->dev_addr.addr;
+ program->streams[0].address = vertices_bo->dev_addr.addr;
pvr_pds_vertex_shader(program, NULL, PDS_GENERATE_SIZES, dev_info);
VkResult pvr_pds_clear_vertex_shader_program_create_and_upload_data(
struct pvr_pds_vertex_shader_program *program,
struct pvr_cmd_buffer *cmd_buffer,
- struct pvr_bo *vertices_bo,
+ struct pvr_suballoc_bo *vertices_bo,
struct pvr_pds_upload *const pds_upload_out)
{
struct pvr_device_info *dev_info = &cmd_buffer->device->pdevice->dev_info;
uint32_t *staging_buffer;
VkResult result;
- program->streams[0].address = vertices_bo->vma->dev_addr.addr;
+ program->streams[0].address = vertices_bo->dev_addr.addr;
pvr_pds_vertex_shader(program, NULL, PDS_GENERATE_SIZES, dev_info);
void pvr_pds_clear_rta_vertex_shader_program_init_base(
struct pvr_pds_vertex_shader_program *program,
- const struct pvr_bo *usc_shader_bo)
+ const struct pvr_suballoc_bo *usc_shader_bo)
{
pvr_pds_clear_vertex_shader_program_init_base(program, usc_shader_bo);
void pvr_pds_clear_vertex_shader_program_init_base(
struct pvr_pds_vertex_shader_program *program,
- const struct pvr_bo *usc_shader_bo);
+ const struct pvr_suballoc_bo *usc_shader_bo);
VkResult pvr_pds_clear_vertex_shader_program_create_and_upload(
struct pvr_pds_vertex_shader_program *program,
struct pvr_device *device,
- const struct pvr_bo *vertices_bo,
+ const struct pvr_suballoc_bo *vertices_bo,
struct pvr_pds_upload *const upload_out);
VkResult pvr_pds_clear_vertex_shader_program_create_and_upload_data(
struct pvr_pds_vertex_shader_program *program,
struct pvr_cmd_buffer *cmd_buffer,
- struct pvr_bo *vertices_bo,
+ struct pvr_suballoc_bo *vertices_bo,
struct pvr_pds_upload *const pds_upload_out);
void pvr_pds_clear_rta_vertex_shader_program_init_base(
struct pvr_pds_vertex_shader_program *program,
- const struct pvr_bo *usc_shader_bo);
+ const struct pvr_suballoc_bo *usc_shader_bo);
/* Each code and data upload function clears the other's fields in the
* pds_upload_out. So when uploading the code, the data fields will be 0.
pvr_pds_clear_rta_vertex_shader_program_create_and_upload_data(
struct pvr_pds_vertex_shader_program *program,
struct pvr_cmd_buffer *cmd_buffer,
- struct pvr_bo *vertices_bo,
+ struct pvr_suballoc_bo *vertices_bo,
struct pvr_pds_upload *const pds_upload_out)
{
return pvr_pds_clear_vertex_shader_program_create_and_upload_data(
VkResult pvr_clear_vertices_upload(struct pvr_device *device,
const VkRect2D *rect,
float depth,
- struct pvr_bo **const pvr_bo_out);
+ struct pvr_suballoc_bo **const pvr_bo_out);
/* TODO: Create pvr_blit.h, rename this, and move it there? */
/* This is provided by pvr_blit.c instead of the usual pvr_clear.c . */
util_dynarray_fini(&sub_cmd->gfx.sec_query_indices);
pvr_csb_finish(&sub_cmd->gfx.control_stream);
pvr_bo_free(cmd_buffer->device, sub_cmd->gfx.terminate_ctrl_stream);
- pvr_bo_free(cmd_buffer->device, sub_cmd->gfx.depth_bias_bo);
- pvr_bo_free(cmd_buffer->device, sub_cmd->gfx.scissor_bo);
+ pvr_bo_suballoc_free(sub_cmd->gfx.depth_bias_bo);
+ pvr_bo_suballoc_free(sub_cmd->gfx.scissor_bo);
break;
case PVR_SUB_CMD_TYPE_COMPUTE:
pvr_cmd_buffer_free_sub_cmds(cmd_buffer);
- list_for_each_entry_safe (struct pvr_bo, bo, &cmd_buffer->bo_list, link) {
- list_del(&bo->link);
- pvr_bo_free(cmd_buffer->device, bo);
+ list_for_each_entry_safe (struct pvr_suballoc_bo,
+ suballoc_bo,
+ &cmd_buffer->bo_list,
+ link) {
+ list_del(&suballoc_bo->link);
+ pvr_bo_suballoc_free(suballoc_bo);
}
util_dynarray_fini(&cmd_buffer->deferred_clears);
return VK_SUCCESS;
err_free_depth_bias_bo:
- pvr_bo_free(device, sub_cmd->depth_bias_bo);
+ pvr_bo_suballoc_free(sub_cmd->depth_bias_bo);
sub_cmd->depth_bias_bo = NULL;
return result;
csb->stream_type == PVR_CMD_STREAM_TYPE_GRAPHICS_DEFERRED);
pvr_csb_emit (csb, VDMCTRL_PPP_STATE0, state0) {
- state0.addrmsb = framebuffer->ppp_state_bo->vma->dev_addr;
+ state0.addrmsb = framebuffer->ppp_state_bo->dev_addr;
state0.word_count = framebuffer->ppp_state_size;
}
pvr_csb_emit (csb, VDMCTRL_PPP_STATE1, state1) {
- state1.addrlsb = framebuffer->ppp_state_bo->vma->dev_addr;
+ state1.addrlsb = framebuffer->ppp_state_bo->dev_addr;
}
return csb->status;
}
-VkResult pvr_cmd_buffer_upload_general(struct pvr_cmd_buffer *const cmd_buffer,
- const void *const data,
- const size_t size,
- struct pvr_bo **const pvr_bo_out)
+VkResult
+pvr_cmd_buffer_upload_general(struct pvr_cmd_buffer *const cmd_buffer,
+ const void *const data,
+ const size_t size,
+ struct pvr_suballoc_bo **const pvr_bo_out)
{
struct pvr_device *const device = cmd_buffer->device;
const uint32_t cache_line_size =
rogue_get_slc_cache_line_size(&device->pdevice->dev_info);
- struct pvr_bo *pvr_bo;
+ struct pvr_suballoc_bo *suballoc_bo;
VkResult result;
result = pvr_gpu_upload(device,
data,
size,
cache_line_size,
- &pvr_bo);
+ &suballoc_bo);
if (result != VK_SUCCESS) {
cmd_buffer->state.status = result;
return result;
}
- list_add(&pvr_bo->link, &cmd_buffer->bo_list);
+ list_add(&suballoc_bo->link, &cmd_buffer->bo_list);
- *pvr_bo_out = pvr_bo;
+ *pvr_bo_out = suballoc_bo;
return VK_SUCCESS;
}
const void *const code,
const size_t code_size,
uint64_t code_alignment,
- struct pvr_bo **const pvr_bo_out)
+ struct pvr_suballoc_bo **const pvr_bo_out)
{
struct pvr_device *const device = cmd_buffer->device;
const uint32_t cache_line_size =
rogue_get_slc_cache_line_size(&device->pdevice->dev_info);
- struct pvr_bo *pvr_bo;
+ struct pvr_suballoc_bo *suballoc_bo;
VkResult result;
code_alignment = MAX2(code_alignment, cache_line_size);
result =
- pvr_gpu_upload_usc(device, code, code_size, code_alignment, &pvr_bo);
+ pvr_gpu_upload_usc(device, code, code_size, code_alignment, &suballoc_bo);
if (result != VK_SUCCESS) {
cmd_buffer->state.status = result;
return result;
}
- list_add(&pvr_bo->link, &cmd_buffer->bo_list);
+ list_add(&suballoc_bo->link, &cmd_buffer->bo_list);
- *pvr_bo_out = pvr_bo;
+ *pvr_bo_out = suballoc_bo;
return VK_SUCCESS;
}
PVR_DW_TO_BYTES(cmd_buffer->device->pixel_event_data_size_in_dwords);
const VkAllocationCallbacks *const allocator = &cmd_buffer->vk.pool->alloc;
struct pvr_device *const device = cmd_buffer->device;
+ struct pvr_suballoc_bo *usc_eot_program = NULL;
struct util_dynarray eot_program_bin;
- struct pvr_bo *usc_eot_program = NULL;
uint32_t *staging_buffer;
uint32_t usc_temp_count;
VkResult result;
return result;
pvr_pds_setup_doutu(&pixel_event_program.task_control,
- usc_eot_program->vma->dev_addr.addr,
+ usc_eot_program->dev_addr.addr,
usc_temp_count,
PVRX(PDSINST_DOUTU_SAMPLE_RATE_INSTANCE),
false);
err_free_usc_pixel_program:
list_del(&usc_eot_program->link);
- pvr_bo_free(device, usc_eot_program);
+ pvr_bo_suballoc_free(usc_eot_program);
return result;
}
&hw_render->color_init[0];
const VkClearValue *clear_value =
&render_pass_info->clear_values[color_init->index];
+ struct pvr_suballoc_bo *clear_bo;
uint32_t attachment_count;
- struct pvr_bo *clear_bo;
bool has_depth_clear;
bool has_depth_load;
VkResult result;
if (result != VK_SUCCESS)
return result;
- *addr_out = clear_bo->vma->dev_addr;
+ *addr_out = clear_bo->dev_addr;
return VK_SUCCESS;
}
job->border_colour_table_addr = PVR_DEV_ADDR_INVALID;
if (sub_cmd->depth_bias_bo)
- job->depth_bias_table_addr = sub_cmd->depth_bias_bo->vma->dev_addr;
+ job->depth_bias_table_addr = sub_cmd->depth_bias_bo->dev_addr;
else
job->depth_bias_table_addr = PVR_DEV_ADDR_INVALID;
if (sub_cmd->scissor_bo)
- job->scissor_table_addr = sub_cmd->scissor_bo->vma->dev_addr;
+ job->scissor_table_addr = sub_cmd->scissor_bo->dev_addr;
else
job->scissor_table_addr = PVR_DEV_ADDR_INVALID;
struct pvr_sub_cmd *sub_cmd = state->current_sub_cmd;
struct pvr_device *device = cmd_buffer->device;
const struct pvr_query_pool *query_pool = NULL;
- struct pvr_bo *query_indices_bo = NULL;
+ struct pvr_suballoc_bo *query_bo = NULL;
size_t query_indices_size = 0;
VkResult result;
result = pvr_cmd_buffer_upload_general(cmd_buffer,
data,
query_indices_size,
- &query_indices_bo);
+ &query_bo);
if (result != VK_SUCCESS) {
state->status = result;
return result;
struct pvr_sub_cmd_event *sub_cmd;
struct pvr_query_info query_info;
- assert(query_indices_bo);
+ assert(query_bo);
assert(query_indices_size);
query_info.type = PVR_QUERY_TYPE_AVAILABILITY_WRITE;
/* sizeof(uint32_t) is for the size of single query. */
query_info.availability_write.num_query_indices =
query_indices_size / sizeof(uint32_t);
- query_info.availability_write.index_bo = query_indices_bo;
+ query_info.availability_write.index_bo = query_bo;
query_info.availability_write.num_queries = query_pool->query_count;
query_info.availability_write.availability_bo =
const struct pvr_const_map_entry_doutu_address *const doutu_addr =
(struct pvr_const_map_entry_doutu_address *)entries;
const pvr_dev_addr_t exec_addr =
- PVR_DEV_ADDR_OFFSET(vertex_state->bo->vma->dev_addr,
+ PVR_DEV_ADDR_OFFSET(vertex_state->bo->dev_addr,
vertex_state->entry_offset);
uint64_t addr = 0ULL;
switch (special_buff_entry->buffer_type) {
case PVR_BUFFER_TYPE_COMPILE_TIME: {
- uint64_t addr = descriptor_state->static_consts->vma->dev_addr.addr;
+ uint64_t addr = descriptor_state->static_consts->dev_addr.addr;
PVR_WRITE(qword_buffer,
addr,
{
uint64_t bound_desc_sets[PVR_MAX_DESCRIPTOR_SETS];
const struct pvr_descriptor_state *desc_state;
+ struct pvr_suballoc_bo *suballoc_bo;
uint32_t dynamic_offset_idx = 0;
- struct pvr_bo *bo;
VkResult result;
switch (stage) {
result = pvr_cmd_buffer_upload_general(cmd_buffer,
bound_desc_sets,
sizeof(bound_desc_sets),
- &bo);
+ &suballoc_bo);
if (result != VK_SUCCESS)
return result;
- *addr_out = bo->vma->dev_addr;
+ *addr_out = suballoc_bo->dev_addr;
return VK_SUCCESS;
}
cmd_buffer->vk.dynamic_graphics_state.cb.blend_constants;
size_t size =
sizeof(cmd_buffer->vk.dynamic_graphics_state.cb.blend_constants);
- struct pvr_bo *blend_consts_bo;
+ struct pvr_suballoc_bo *blend_consts_bo;
result = pvr_cmd_buffer_upload_general(cmd_buffer,
blend_consts,
if (result != VK_SUCCESS)
return result;
- *addr_out = blend_consts_bo->vma->dev_addr;
+ *addr_out = blend_consts_bo->dev_addr;
break;
}
static VkResult pvr_cmd_upload_push_consts(struct pvr_cmd_buffer *cmd_buffer)
{
struct pvr_cmd_buffer_state *state = &cmd_buffer->state;
- struct pvr_bo *bo;
+ struct pvr_suballoc_bo *suballoc_bo;
VkResult result;
/* TODO: Here are some possible optimizations/things to consider:
result = pvr_cmd_buffer_upload_general(cmd_buffer,
state->push_constants.data,
sizeof(state->push_constants.data),
- &bo);
+ &suballoc_bo);
if (result != VK_SUCCESS)
return result;
- cmd_buffer->state.push_constants.dev_addr = bo->vma->dev_addr;
+ cmd_buffer->state.push_constants.dev_addr = suballoc_bo->dev_addr;
cmd_buffer->state.push_constants.uploaded = true;
return VK_SUCCESS;
if (indirect_addr.addr) {
descriptor_data_offset_out = indirect_addr;
} else {
- struct pvr_bo *num_workgroups_bo;
+ struct pvr_suballoc_bo *num_workgroups_bo;
result = pvr_cmd_buffer_upload_general(cmd_buffer,
workgroup_size,
if (result != VK_SUCCESS)
return;
- descriptor_data_offset_out = num_workgroups_bo->vma->dev_addr;
+ descriptor_data_offset_out = num_workgroups_bo->dev_addr;
}
result = pvr_setup_descriptor_mappings(
prim_db_elems + cmd->dbsc.state.depthbias_index;
const uint32_t num_dwords =
pvr_cmd_length(TA_STATE_HEADER) + pvr_cmd_length(TA_STATE_ISPDBSC);
+ struct pvr_suballoc_bo *suballoc_bo;
uint32_t ppp_state[num_dwords];
- struct pvr_bo *pvr_bo;
VkResult result;
pvr_csb_pack (&ppp_state[0], TA_STATE_HEADER, header) {
result = pvr_cmd_buffer_upload_general(cmd_buffer,
&ppp_state[0],
sizeof(ppp_state),
- &pvr_bo);
+ &suballoc_bo);
if (result != VK_SUCCESS)
return result;
pvr_csb_pack (&cmd->dbsc.vdm_state[0], VDMCTRL_PPP_STATE0, state) {
state.word_count = num_dwords;
- state.addrmsb = pvr_bo->vma->dev_addr;
+ state.addrmsb = suballoc_bo->dev_addr;
}
pvr_csb_pack (&cmd->dbsc.vdm_state[1], VDMCTRL_PPP_STATE1, state) {
- state.addrlsb = pvr_bo->vma->dev_addr;
+ state.addrlsb = suballoc_bo->dev_addr;
}
break;
*/
#define PVR_BUFFER_MEMORY_PADDING_SIZE 4
+/* Default size in bytes used by pvr_CreateDevice() for setting up the
+ * suballoc_general, suballoc_pds and suballoc_usc suballocators.
+ *
+ * TODO: Investigate if a different default size can improve the overall
+ * performance of internal driver allocations.
+ */
+#define PVR_SUBALLOCATOR_GENERAL_SIZE (128 * 1024)
+#define PVR_SUBALLOCATOR_PDS_SIZE (128 * 1024)
+#define PVR_SUBALLOCATOR_USC_SIZE (128 * 1024)
+
struct pvr_drm_device_info {
const char *name;
size_t len;
8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!staging_buffer) {
- pvr_bo_free(device, sw_compute_barrier_upload_out->pvr_bo);
+ pvr_bo_suballoc_free(sw_compute_barrier_upload_out->pvr_bo);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
upload_out);
if (result != VK_SUCCESS) {
vk_free(&device->vk.alloc, staging_buffer);
- pvr_bo_free(device, sw_compute_barrier_upload_out->pvr_bo);
+ pvr_bo_suballoc_free(sw_compute_barrier_upload_out->pvr_bo);
return result;
}
/* Generate and upload PDS programs. */
result = pvr_pds_idfwdf_programs_create_and_upload(
device,
- device->idfwdf_state.usc->vma->dev_addr,
+ device->idfwdf_state.usc->dev_addr,
usc_shareds,
usc_temps,
device->idfwdf_state.shareds_bo->vma->dev_addr,
pvr_bo_free(device, device->idfwdf_state.store_bo);
err_free_usc_program:
- pvr_bo_free(device, device->idfwdf_state.usc);
+ pvr_bo_suballoc_free(device->idfwdf_state.usc);
return result;
}
static void pvr_device_finish_compute_idfwdf_state(struct pvr_device *device)
{
- pvr_bo_free(device, device->idfwdf_state.pds.pvr_bo);
- pvr_bo_free(device, device->idfwdf_state.sw_compute_barrier_pds.pvr_bo);
+ pvr_bo_suballoc_free(device->idfwdf_state.pds.pvr_bo);
+ pvr_bo_suballoc_free(device->idfwdf_state.sw_compute_barrier_pds.pvr_bo);
pvr_bo_free(device, device->idfwdf_state.shareds_bo);
pvr_bo_free(device, device->idfwdf_state.store_bo);
- pvr_bo_free(device, device->idfwdf_state.usc);
+ pvr_bo_suballoc_free(device->idfwdf_state.usc);
}
/* FIXME: We should be calculating the size when we upload the code in
/* Setup a PDS program that kicks the static USC program. */
pvr_pds_setup_doutu(&program.usc_task_control,
- device->nop_program.usc->vma->dev_addr.addr,
+ device->nop_program.usc->dev_addr.addr,
0U,
PVRX(PDSINST_DOUTU_SAMPLE_RATE_INSTANCE),
false);
vk_free(&device->vk.alloc, staging_buffer);
err_free_nop_usc_bo:
- pvr_bo_free(device, device->nop_program.usc);
+ pvr_bo_suballoc_free(device->nop_program.usc);
return result;
}
if (result != VK_SUCCESS)
goto err_pvr_winsys_destroy;
+ pvr_bo_suballocator_init(&device->suballoc_general,
+ device->heaps.general_heap,
+ device,
+ PVR_SUBALLOCATOR_GENERAL_SIZE);
+ pvr_bo_suballocator_init(&device->suballoc_pds,
+ device->heaps.pds_heap,
+ device,
+ PVR_SUBALLOCATOR_PDS_SIZE);
+ pvr_bo_suballocator_init(&device->suballoc_usc,
+ device->heaps.usc_heap,
+ device,
+ PVR_SUBALLOCATOR_USC_SIZE);
+
if (p_atomic_inc_return(&instance->active_device_count) >
PVR_SECONDARY_DEVICE_THRESHOLD) {
initial_free_list_size = PVR_SECONDARY_DEVICE_FREE_LIST_INITAL_SIZE;
pvr_device_destroy_compute_query_programs(device);
err_pvr_free_compute_empty:
- pvr_bo_free(device, device->pds_compute_empty_program.pvr_bo);
+ pvr_bo_suballoc_free(device->pds_compute_empty_program.pvr_bo);
err_pvr_free_compute_fence:
- pvr_bo_free(device, device->pds_compute_fence_program.pvr_bo);
+ pvr_bo_suballoc_free(device->pds_compute_fence_program.pvr_bo);
err_pvr_free_nop_program:
- pvr_bo_free(device, device->nop_program.pds.pvr_bo);
- pvr_bo_free(device, device->nop_program.usc);
+ pvr_bo_suballoc_free(device->nop_program.pds.pvr_bo);
+ pvr_bo_suballoc_free(device->nop_program.usc);
err_pvr_free_list_destroy:
pvr_free_list_destroy(device->global_free_list);
err_dec_device_count:
p_atomic_dec(&device->instance->active_device_count);
+ pvr_bo_suballocator_fini(&device->suballoc_usc);
+ pvr_bo_suballocator_fini(&device->suballoc_pds);
+ pvr_bo_suballocator_fini(&device->suballoc_general);
+
pvr_bo_store_destroy(device);
err_pvr_winsys_destroy:
pvr_device_finish_graphics_static_clear_state(device);
pvr_device_finish_compute_idfwdf_state(device);
pvr_device_destroy_compute_query_programs(device);
- pvr_bo_free(device, device->pds_compute_empty_program.pvr_bo);
- pvr_bo_free(device, device->pds_compute_fence_program.pvr_bo);
- pvr_bo_free(device, device->nop_program.pds.pvr_bo);
- pvr_bo_free(device, device->nop_program.usc);
+ pvr_bo_suballoc_free(device->pds_compute_empty_program.pvr_bo);
+ pvr_bo_suballoc_free(device->pds_compute_fence_program.pvr_bo);
+ pvr_bo_suballoc_free(device->nop_program.pds.pvr_bo);
+ pvr_bo_suballoc_free(device->nop_program.usc);
pvr_free_list_destroy(device->global_free_list);
+ pvr_bo_suballocator_fini(&device->suballoc_usc);
+ pvr_bo_suballocator_fini(&device->suballoc_pds);
+ pvr_bo_suballocator_fini(&device->suballoc_general);
pvr_bo_store_destroy(device);
pvr_winsys_destroy(device->ws);
const void *data,
size_t size,
uint64_t alignment,
- struct pvr_bo **const pvr_bo_out)
+ struct pvr_suballoc_bo **const pvr_bo_out)
{
- struct pvr_bo *pvr_bo = NULL;
+ struct pvr_suballoc_bo *suballoc_bo = NULL;
+ struct pvr_suballocator *allocator;
VkResult result;
+ void *map;
assert(size > 0);
- result = pvr_bo_alloc(device,
- heap,
- size,
- alignment,
- PVR_BO_ALLOC_FLAG_CPU_MAPPED,
- &pvr_bo);
+ if (heap == device->heaps.general_heap)
+ allocator = &device->suballoc_general;
+ else if (heap == device->heaps.pds_heap)
+ allocator = &device->suballoc_pds;
+ else if (heap == device->heaps.usc_heap)
+ allocator = &device->suballoc_usc;
+ else
+ unreachable("Unknown heap type");
+
+ result = pvr_bo_suballoc(allocator, size, alignment, false, &suballoc_bo);
if (result != VK_SUCCESS)
return result;
- memcpy(pvr_bo->bo->map, data, size);
- pvr_bo_cpu_unmap(device, pvr_bo);
+ map = pvr_bo_suballoc_get_map_addr(suballoc_bo);
+ memcpy(map, data, size);
- *pvr_bo_out = pvr_bo;
+ *pvr_bo_out = suballoc_bo;
return VK_SUCCESS;
}
const void *code,
size_t code_size,
uint64_t code_alignment,
- struct pvr_bo **const pvr_bo_out)
+ struct pvr_suballoc_bo **const pvr_bo_out)
{
- struct pvr_bo *pvr_bo = NULL;
+ struct pvr_suballoc_bo *suballoc_bo = NULL;
VkResult result;
+ void *map;
assert(code_size > 0);
* instruction to prevent reading off the end of a page into a potentially
* unallocated page.
*/
- result = pvr_bo_alloc(device,
- device->heaps.usc_heap,
- code_size + ROGUE_MAX_INSTR_BYTES,
- code_alignment,
- PVR_BO_ALLOC_FLAG_CPU_MAPPED,
- &pvr_bo);
+ result = pvr_bo_suballoc(&device->suballoc_usc,
+ code_size + ROGUE_MAX_INSTR_BYTES,
+ code_alignment,
+ false,
+ &suballoc_bo);
if (result != VK_SUCCESS)
return result;
- memcpy(pvr_bo->bo->map, code, code_size);
- pvr_bo_cpu_unmap(device, pvr_bo);
+ map = pvr_bo_suballoc_get_map_addr(suballoc_bo);
+ memcpy(map, code, code_size);
- *pvr_bo_out = pvr_bo;
+ *pvr_bo_out = suballoc_bo;
return VK_SUCCESS;
}
const uint64_t bo_alignment = MAX2(min_alignment, data_alignment);
const uint64_t bo_size = (!!code) ? (code_offset + code_aligned_size)
: data_aligned_size;
- const uint64_t bo_flags = PVR_BO_ALLOC_FLAG_CPU_MAPPED |
- PVR_BO_ALLOC_FLAG_ZERO_ON_ALLOC;
VkResult result;
+ void *map;
assert(code || data);
assert(!code || (code_size_dwords != 0 && code_alignment != 0));
assert(!data || (data_size_dwords != 0 && data_alignment != 0));
- result = pvr_bo_alloc(device,
- device->heaps.pds_heap,
- bo_size,
- bo_alignment,
- bo_flags,
- &pds_upload_out->pvr_bo);
+ result = pvr_bo_suballoc(&device->suballoc_pds,
+ bo_size,
+ bo_alignment,
+ true,
+ &pds_upload_out->pvr_bo);
if (result != VK_SUCCESS)
return result;
+ map = pvr_bo_suballoc_get_map_addr(pds_upload_out->pvr_bo);
+
if (data) {
- memcpy(pds_upload_out->pvr_bo->bo->map, data, data_size);
+ memcpy(map, data, data_size);
- pds_upload_out->data_offset = pds_upload_out->pvr_bo->vma->dev_addr.addr -
+ pds_upload_out->data_offset = pds_upload_out->pvr_bo->dev_addr.addr -
device->heaps.pds_heap->base_addr.addr;
/* Store data size in dwords. */
}
if (code) {
- memcpy((uint8_t *)pds_upload_out->pvr_bo->bo->map + code_offset,
- code,
- code_size);
+ memcpy((uint8_t *)map + code_offset, code, code_size);
pds_upload_out->code_offset =
- (pds_upload_out->pvr_bo->vma->dev_addr.addr + code_offset) -
+ (pds_upload_out->pvr_bo->dev_addr.addr + code_offset) -
device->heaps.pds_heap->base_addr.addr;
/* Store code size in dwords. */
pds_upload_out->code_size = 0;
}
- pvr_bo_cpu_unmap(device, pds_upload_out->pvr_bo);
-
return VK_SUCCESS;
}
pvr_render_targets_fini(framebuffer->render_targets, render_targets_count);
err_free_ppp_state_bo:
- pvr_bo_free(device, framebuffer->ppp_state_bo);
+ pvr_bo_suballoc_free(framebuffer->ppp_state_bo);
err_free_framebuffer:
vk_object_base_finish(&framebuffer->base);
pvr_spm_scratch_buffer_release(device, framebuffer->scratch_buffer);
pvr_render_targets_fini(framebuffer->render_targets,
framebuffer->render_targets_count);
- pvr_bo_free(device, framebuffer->ppp_state_bo);
+ pvr_bo_suballoc_free(framebuffer->ppp_state_bo);
vk_object_base_finish(&framebuffer->base);
vk_free2(&device->vk.alloc, pAllocator, framebuffer);
}
return VK_SUCCESS;
err_free_pds_store_program:
- pvr_bo_free(device, pt_programs->pds_store_program.pvr_bo);
+ pvr_bo_suballoc_free(pt_programs->pds_store_program.pvr_bo);
err_free_store_resume_state_bo:
pvr_bo_free(device, pt_programs->store_resume_state_bo);
pvr_render_job_pt_programs_cleanup(struct pvr_device *device,
struct rogue_pt_programs *pt_programs)
{
- pvr_bo_free(device, pt_programs->pds_resume_program.pvr_bo);
- pvr_bo_free(device, pt_programs->pds_store_program.pvr_bo);
+ pvr_bo_suballoc_free(pt_programs->pds_resume_program.pvr_bo);
+ pvr_bo_suballoc_free(pt_programs->pds_store_program.pvr_bo);
pvr_bo_free(device, pt_programs->store_resume_state_bo);
}
goto err_free_store_load_state_bo;
usc_store_program_upload_offset =
- sr_programs->usc.store_program_bo->vma->dev_addr.addr -
+ sr_programs->usc.store_program_bo->dev_addr.addr -
device->heaps.usc_heap->base_addr.addr;
/* USC state update: SR state load. */
goto err_free_usc_store_program_bo;
usc_load_program_upload_offset =
- sr_programs->usc.load_program_bo->vma->dev_addr.addr -
+ sr_programs->usc.load_program_bo->dev_addr.addr -
device->heaps.usc_heap->base_addr.addr;
/* FIXME: The number of USC temps should be output alongside
return VK_SUCCESS;
err_free_pds_store_program_bo:
- pvr_bo_free(device, sr_programs->pds.store_program.pvr_bo);
+ pvr_bo_suballoc_free(sr_programs->pds.store_program.pvr_bo);
err_free_usc_load_program_bo:
- pvr_bo_free(device, sr_programs->usc.load_program_bo);
+ pvr_bo_suballoc_free(sr_programs->usc.load_program_bo);
err_free_usc_store_program_bo:
- pvr_bo_free(device, sr_programs->usc.store_program_bo);
+ pvr_bo_suballoc_free(sr_programs->usc.store_program_bo);
err_free_store_load_state_bo:
pvr_bo_free(device, sr_programs->store_load_state_bo);
static void pvr_ctx_sr_programs_cleanup(struct pvr_device *device,
struct rogue_sr_programs *sr_programs)
{
- pvr_bo_free(device, sr_programs->pds.load_program.pvr_bo);
- pvr_bo_free(device, sr_programs->pds.store_program.pvr_bo);
- pvr_bo_free(device, sr_programs->usc.load_program_bo);
- pvr_bo_free(device, sr_programs->usc.store_program_bo);
+ pvr_bo_suballoc_free(sr_programs->pds.load_program.pvr_bo);
+ pvr_bo_suballoc_free(sr_programs->pds.store_program.pvr_bo);
+ pvr_bo_suballoc_free(sr_programs->usc.load_program_bo);
+ pvr_bo_suballoc_free(sr_programs->usc.store_program_bo);
pvr_bo_free(device, sr_programs->store_load_state_bo);
}
pvr_ctx_reset_cmd_fini(device, &ctx->reset_cmd);
err_free_pds_fence_terminate_program:
- pvr_bo_free(device, ctx->ctx_switch.sr_fence_terminate_program.pvr_bo);
+ pvr_bo_suballoc_free(ctx->ctx_switch.sr_fence_terminate_program.pvr_bo);
err_free_sr_programs:
for (uint32_t i = 0; i < ARRAY_SIZE(ctx->ctx_switch.sr); ++i)
pvr_ctx_reset_cmd_fini(device, &ctx->reset_cmd);
- pvr_bo_free(device, ctx->ctx_switch.sr_fence_terminate_program.pvr_bo);
+ pvr_bo_suballoc_free(ctx->ctx_switch.sr_fence_terminate_program.pvr_bo);
for (uint32_t i = 0; i < ARRAY_SIZE(ctx->ctx_switch.sr); ++i)
pvr_ctx_sr_programs_cleanup(device, &ctx->ctx_switch.sr[i]);
util_dynarray_fini(&eot_bin);
if (result != VK_SUCCESS) {
for (uint32_t j = 0; j < i; j++)
- pvr_bo_free(device, ctx->usc_eot_bos[j]);
+ pvr_bo_suballoc_free(ctx->usc_eot_bos[j]);
return result;
}
struct pvr_transfer_ctx *ctx)
{
for (uint32_t i = 0; i < ARRAY_SIZE(ctx->usc_eot_bos); i++)
- pvr_bo_free(device, ctx->usc_eot_bos[i]);
+ pvr_bo_suballoc_free(ctx->usc_eot_bos[i]);
}
static VkResult pvr_transfer_ctx_shaders_init(struct pvr_device *device,
if (!ctx->pds_unitex_code[i][j].pvr_bo)
continue;
- pvr_bo_free(device, ctx->pds_unitex_code[i][j].pvr_bo);
+ pvr_bo_suballoc_free(ctx->pds_unitex_code[i][j].pvr_bo);
}
}
if (!ctx->pds_unitex_code[i][j].pvr_bo)
continue;
- pvr_bo_free(device, ctx->pds_unitex_code[i][j].pvr_bo);
+ pvr_bo_suballoc_free(ctx->pds_unitex_code[i][j].pvr_bo);
}
}
struct {
uint8_t unified_size;
- struct pvr_bo *store_program_bo;
+ struct pvr_suballoc_bo *store_program_bo;
- struct pvr_bo *load_program_bo;
+ struct pvr_suballoc_bo *load_program_bo;
} usc;
struct {
struct pvr_transfer_frag_store frag_store;
- struct pvr_bo *usc_eot_bos[PVR_TRANSFER_MAX_RENDER_TARGETS];
+ struct pvr_suballoc_bo *usc_eot_bos[PVR_TRANSFER_MAX_RENDER_TARGETS];
struct pvr_pds_upload pds_unitex_code[PVR_TRANSFER_MAX_TEXSTATE_DMA]
[PVR_TRANSFER_MAX_UNIFORM_DMA];
assert(rt_count <= ARRAY_SIZE(ctx->usc_eot_bos));
assert(rt_count > 0U);
- addr.addr = ctx->usc_eot_bos[rt_count - 1U]->vma->dev_addr.addr -
+ addr.addr = ctx->usc_eot_bos[rt_count - 1U]->dev_addr.addr -
device->heaps.usc_heap->base_addr.addr;
pvr_pds_setup_doutu(&program.task_control,
return VK_SUCCESS;
err_free_pds_frag_prog:
- pvr_bo_free(device, load_op->pds_frag_prog.pvr_bo);
+ pvr_bo_suballoc_free(load_op->pds_frag_prog.pvr_bo);
err_free_usc_frag_prog_bo:
- pvr_bo_free(device, load_op->usc_frag_prog_bo);
+ pvr_bo_suballoc_free(load_op->usc_frag_prog_bo);
return result;
}
const VkAllocationCallbacks *allocator,
struct pvr_load_op *load_op)
{
- pvr_bo_free(device, load_op->pds_tex_state_prog.pvr_bo);
- pvr_bo_free(device, load_op->pds_frag_prog.pvr_bo);
- pvr_bo_free(device, load_op->usc_frag_prog_bo);
+ pvr_bo_suballoc_free(load_op->pds_tex_state_prog.pvr_bo);
+ pvr_bo_suballoc_free(load_op->pds_frag_prog.pvr_bo);
+ pvr_bo_suballoc_free(load_op->usc_frag_prog_bo);
vk_free2(&device->vk.alloc, allocator, load_op);
}
VkResult pvr_pds_fragment_program_create_and_upload(
struct pvr_device *device,
const VkAllocationCallbacks *allocator,
- const struct pvr_bo *fragment_shader_bo,
+ const struct pvr_suballoc_bo *fragment_shader_bo,
uint32_t fragment_temp_count,
enum rogue_msaa_mode msaa_mode,
bool has_phase_rate_change,
* allocating the buffer. The size from pvr_pds_kick_usc() is constant.
*/
pvr_pds_setup_doutu(&program.usc_task_control,
- fragment_shader_bo->vma->dev_addr.addr,
+ fragment_shader_bo->dev_addr.addr,
fragment_temp_count,
sample_rate,
has_phase_rate_change);
const struct VkAllocationCallbacks *const allocator,
struct pvr_pds_attrib_program *const program)
{
- pvr_bo_free(device, program->program.pvr_bo);
+ pvr_bo_suballoc_free(program->program.pvr_bo);
vk_free2(&device->vk.alloc, allocator, program->info.entries);
}
const struct rogue_ubo_data *ubo_data,
pvr_pds_descriptor_program_buffer_array_ptr buffers_out_ptr,
uint32_t *const buffer_count_out,
- struct pvr_bo **const static_consts_pvr_bo_out)
+ struct pvr_suballoc_bo **const static_consts_pvr_bo_out)
{
struct pvr_pds_buffer *const buffers = *buffers_out_ptr;
uint32_t buffer_count = 0;
8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!entries_buffer) {
- pvr_bo_free(device, descriptor_state->static_consts);
+ pvr_bo_suballoc_free(descriptor_state->static_consts);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!staging_buffer) {
- pvr_bo_free(device, descriptor_state->static_consts);
+ pvr_bo_suballoc_free(descriptor_state->static_consts);
vk_free2(&device->vk.alloc, allocator, entries_buffer);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!entries_buffer) {
- pvr_bo_free(device, descriptor_state->static_consts);
+ pvr_bo_suballoc_free(descriptor_state->static_consts);
vk_free2(&device->vk.alloc, allocator, staging_buffer);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
16,
&descriptor_state->pds_code);
if (result != VK_SUCCESS) {
- pvr_bo_free(device, descriptor_state->static_consts);
+ pvr_bo_suballoc_free(descriptor_state->static_consts);
vk_free2(&device->vk.alloc, allocator, entries_buffer);
vk_free2(&device->vk.alloc, allocator, staging_buffer);
if (!descriptor_state)
return;
- pvr_bo_free(device, descriptor_state->pds_code.pvr_bo);
+ pvr_bo_suballoc_free(descriptor_state->pds_code.pvr_bo);
vk_free2(&device->vk.alloc, allocator, descriptor_state->pds_info.entries);
- pvr_bo_free(device, descriptor_state->static_consts);
+ pvr_bo_suballoc_free(descriptor_state->static_consts);
}
static void pvr_pds_compute_program_setup(
struct pvr_pds_info *const pds_info)
{
/* We don't allocate an entries buffer so we don't need to free it */
- pvr_bo_free(device, pds_program->pvr_bo);
+ pvr_bo_suballoc_free(pds_program->pvr_bo);
}
/* This only uploads the code segment. The data segment will need to be patched
const VkAllocationCallbacks *const allocator,
struct pvr_pds_base_workgroup_program *const state)
{
- pvr_bo_free(device, state->code_upload.pvr_bo);
+ pvr_bo_suballoc_free(state->code_upload.pvr_bo);
vk_free2(&device->vk.alloc, allocator, state->data_section);
}
work_group_input_regs,
barrier_coefficient,
usc_temps,
- compute_pipeline->shader_state.bo->vma->dev_addr,
+ compute_pipeline->shader_state.bo->dev_addr,
&compute_pipeline->primary_program,
&compute_pipeline->primary_program_info);
if (result != VK_SUCCESS)
work_group_input_regs,
barrier_coefficient,
usc_temps,
- compute_pipeline->shader_state.bo->vma->dev_addr,
+ compute_pipeline->shader_state.bo->dev_addr,
&compute_pipeline->primary_base_workgroup_variant_program);
if (result != VK_SUCCESS)
goto err_destroy_compute_program;
&compute_pipeline->primary_program_info);
err_free_descriptor_program:
- pvr_bo_free(device, compute_pipeline->descriptor_state.pds_code.pvr_bo);
+ pvr_bo_suballoc_free(compute_pipeline->descriptor_state.pds_code.pvr_bo);
err_free_shader:
- pvr_bo_free(device, compute_pipeline->shader_state.bo);
+ pvr_bo_suballoc_free(compute_pipeline->shader_state.bo);
return result;
}
pvr_pds_descriptor_program_destroy(device,
allocator,
&compute_pipeline->descriptor_state);
- pvr_bo_free(device, compute_pipeline->shader_state.bo);
+ pvr_bo_suballoc_free(compute_pipeline->shader_state.bo);
pvr_pipeline_finish(&compute_pipeline->base);
pvr_pds_vertex_attrib_program_destroy(device, allocator, attrib_program);
}
- pvr_bo_free(device,
- gfx_pipeline->shader_state.fragment.pds_fragment_program.pvr_bo);
- pvr_bo_free(device,
- gfx_pipeline->shader_state.fragment.pds_coeff_program.pvr_bo);
+ pvr_bo_suballoc_free(
+ gfx_pipeline->shader_state.fragment.pds_fragment_program.pvr_bo);
+ pvr_bo_suballoc_free(
+ gfx_pipeline->shader_state.fragment.pds_coeff_program.pvr_bo);
- pvr_bo_free(device, gfx_pipeline->shader_state.fragment.bo);
- pvr_bo_free(device, gfx_pipeline->shader_state.vertex.bo);
+ pvr_bo_suballoc_free(gfx_pipeline->shader_state.fragment.bo);
+ pvr_bo_suballoc_free(gfx_pipeline->shader_state.vertex.bo);
pvr_pipeline_finish(&gfx_pipeline->base);
allocator,
&gfx_pipeline->shader_state.vertex.descriptor_state);
err_free_frag_program:
- pvr_bo_free(device,
- gfx_pipeline->shader_state.fragment.pds_fragment_program.pvr_bo);
+ pvr_bo_suballoc_free(
+ gfx_pipeline->shader_state.fragment.pds_fragment_program.pvr_bo);
err_free_coeff_program:
- pvr_bo_free(device,
- gfx_pipeline->shader_state.fragment.pds_coeff_program.pvr_bo);
+ pvr_bo_suballoc_free(
+ gfx_pipeline->shader_state.fragment.pds_coeff_program.pvr_bo);
err_free_fragment_bo:
- pvr_bo_free(device, gfx_pipeline->shader_state.fragment.bo);
+ pvr_bo_suballoc_free(gfx_pipeline->shader_state.fragment.bo);
err_free_vertex_bo:
- pvr_bo_free(device, gfx_pipeline->shader_state.vertex.bo);
+ pvr_bo_suballoc_free(gfx_pipeline->shader_state.vertex.bo);
err_free_build_context:
ralloc_free(ctx);
return result;
};
struct pvr_pds_upload {
- struct pvr_bo *pvr_bo;
+ struct pvr_suballoc_bo *pvr_bo;
/* Offset from the pds heap base address. */
uint32_t data_offset;
/* Offset from the pds heap base address. */
};
struct pvr_compute_query_shader {
- struct pvr_bo *usc_bo;
+ struct pvr_suballoc_bo *usc_bo;
struct pvr_pds_upload pds_prim_code;
uint32_t primary_data_size_dw;
struct pvr_compute_query_shader *copy_results_shaders;
struct pvr_compute_query_shader *reset_queries_shaders;
+ struct pvr_suballocator suballoc_general;
+ struct pvr_suballocator suballoc_pds;
+ struct pvr_suballocator suballoc_usc;
+
struct {
struct pvr_pds_upload pds;
- struct pvr_bo *usc;
+ struct pvr_suballoc_bo *usc;
} nop_program;
/* Issue Data Fence, Wait for Data Fence state. */
struct {
uint32_t usc_shareds;
- struct pvr_bo *usc;
+ struct pvr_suballoc_bo *usc;
/* Buffer in which the IDF/WDF program performs store ops. */
struct pvr_bo *store_bo;
} idfwdf_state;
struct pvr_device_static_clear_state {
- struct pvr_bo *usc_vertex_shader_bo;
- struct pvr_bo *vertices_bo;
+ struct pvr_suballoc_bo *usc_vertex_shader_bo;
+ struct pvr_suballoc_bo *vertices_bo;
struct pvr_pds_upload pds;
- struct pvr_bo *usc_multi_layer_vertex_shader_bo;
+ /* Only valid if PVR_HAS_FEATURE(dev_info, gs_rta_support). */
+ struct pvr_suballoc_bo *usc_multi_layer_vertex_shader_bo;
struct pvr_static_clear_ppp_base ppp_base;
/* Indexable using VkImageAspectFlags. */
struct pvr_render_job job;
- struct pvr_bo *depth_bias_bo;
- struct pvr_bo *scissor_bo;
+ struct pvr_suballoc_bo *depth_bias_bo;
+ struct pvr_suballoc_bo *scissor_bo;
/* Tracking how the loaded depth/stencil values are being used. */
enum pvr_depth_stencil_usage depth_usage;
struct pvr_pds_info pds_info;
/* Already setup compile time static consts. */
- struct pvr_bo *static_consts;
+ struct pvr_suballoc_bo *static_consts;
};
struct pvr_pds_attrib_program {
struct pvr_compute_shader_state {
/* Pointer to a buffer object that contains the shader binary. */
- struct pvr_bo *bo;
+ struct pvr_suballoc_bo *bo;
bool uses_atomic_ops;
bool uses_barrier;
struct pvr_vertex_shader_state {
/* Pointer to a buffer object that contains the shader binary. */
- struct pvr_bo *bo;
+ struct pvr_suballoc_bo *bo;
uint32_t entry_offset;
/* 2 since we only need STATE_VARYING{0,1} state words. */
struct pvr_fragment_shader_state {
/* Pointer to a buffer object that contains the shader binary. */
- struct pvr_bo *bo;
+ struct pvr_suballoc_bo *bo;
uint32_t entry_offset;
struct pvr_pipeline_stage_state stage_state;
union {
struct {
uint32_t num_query_indices;
- struct pvr_bo *index_bo;
+ struct pvr_suballoc_bo *index_bo;
uint32_t num_queries;
struct pvr_bo *availability_bo;
} availability_write;
struct pvr_image_view **attachments;
/* Derived and other state. */
- struct pvr_bo *ppp_state_bo;
+ struct pvr_suballoc_bo *ppp_state_bo;
/* PPP state size in dwords. */
size_t ppp_state_size;
struct pvr_load_op {
bool is_hw_object;
- struct pvr_bo *usc_frag_prog_bo;
+ struct pvr_suballoc_bo *usc_frag_prog_bo;
uint32_t const_shareds_count;
uint32_t shareds_dest_offset;
uint32_t shareds_count;
struct pvr_winsys_vma **const vma_out,
pvr_dev_addr_t *const dev_addr_out);
void pvr_unbind_memory(struct pvr_device *device, struct pvr_winsys_vma *vma);
-
VkResult pvr_gpu_upload(struct pvr_device *device,
struct pvr_winsys_heap *heap,
const void *data,
size_t size,
uint64_t alignment,
- struct pvr_bo **const pvr_bo_out);
+ struct pvr_suballoc_bo **const pvr_bo_out);
VkResult pvr_gpu_upload_pds(struct pvr_device *device,
const uint32_t *data,
uint32_t data_size_dwords,
uint32_t code_alignment,
uint64_t min_alignment,
struct pvr_pds_upload *const pds_upload_out);
-
VkResult pvr_gpu_upload_usc(struct pvr_device *device,
const void *code,
size_t code_size,
uint64_t code_alignment,
- struct pvr_bo **const pvr_bo_out);
+ struct pvr_suballoc_bo **const pvr_bo_out);
VkResult pvr_cmd_buffer_add_transfer_cmd(struct pvr_cmd_buffer *cmd_buffer,
struct pvr_transfer_cmd *transfer_cmd);
VkResult pvr_pds_fragment_program_create_and_upload(
struct pvr_device *device,
const VkAllocationCallbacks *allocator,
- const struct pvr_bo *fragment_shader_bo,
+ const struct pvr_suballoc_bo *fragment_shader_bo,
uint32_t fragment_temp_count,
enum rogue_msaa_mode msaa_mode,
bool has_phase_rate_change,
uint32_t capacity,
uint32_t size_in_bytes);
-VkResult pvr_cmd_buffer_upload_general(struct pvr_cmd_buffer *const cmd_buffer,
- const void *const data,
- const size_t size,
- struct pvr_bo **const pvr_bo_out);
+VkResult
+pvr_cmd_buffer_upload_general(struct pvr_cmd_buffer *const cmd_buffer,
+ const void *const data,
+ const size_t size,
+ struct pvr_suballoc_bo **const pvr_bo_out);
VkResult pvr_cmd_buffer_upload_pds(struct pvr_cmd_buffer *const cmd_buffer,
const uint32_t *data,
uint32_t data_size_dwords,
pvr_destroy_compute_secondary_prog(struct pvr_device *device,
struct pvr_compute_query_shader *program)
{
- pvr_bo_free(device, program->pds_sec_code.pvr_bo);
+ pvr_bo_suballoc_free(program->pds_sec_code.pvr_bo);
vk_free(&device->vk.alloc, program->info.entries);
}
pvr_init_primary_compute_pds_program(&pds_primary_prog);
pvr_pds_setup_doutu(&pds_primary_prog.usc_task_control,
- query_prog->usc_bo->vma->dev_addr.addr,
+ query_prog->usc_bo->dev_addr.addr,
shader_factory_info->temps_required,
PVRX(PDSINST_DOUTU_SAMPLE_RATE_INSTANCE),
false);
return VK_SUCCESS;
err_free_pds_prim_code_bo:
- pvr_bo_free(device, query_prog->pds_prim_code.pvr_bo);
+ pvr_bo_suballoc_free(query_prog->pds_prim_code.pvr_bo);
err_free_usc_bo:
- pvr_bo_free(device, query_prog->usc_bo);
+ pvr_bo_suballoc_free(query_prog->usc_bo);
return result;
}
const struct pvr_const_map_entry_doutu_address *const doutu_addr =
(struct pvr_const_map_entry_doutu_address *)entries;
const pvr_dev_addr_t exec_addr =
- PVR_DEV_ADDR_OFFSET(query_prog->pds_sec_code.pvr_bo->vma->dev_addr,
+ PVR_DEV_ADDR_OFFSET(query_prog->pds_sec_code.pvr_bo->dev_addr,
query_prog->pds_sec_code.code_offset);
uint64_t addr = 0ULL;
struct pvr_compute_query_shader *program)
{
pvr_destroy_compute_secondary_prog(device, program);
- pvr_bo_free(device, program->pds_prim_code.pvr_bo);
- pvr_bo_free(device, program->usc_bo);
+ pvr_bo_suballoc_free(program->pds_prim_code.pvr_bo);
+ pvr_bo_suballoc_free(program->usc_bo);
}
static VkResult pvr_create_multibuffer_compute_query_program(
struct pvr_texture_state_info tex_info;
uint32_t num_query_indices;
uint32_t *const_buffer;
- struct pvr_bo *pvr_bo;
+ struct pvr_suballoc_bo *pvr_bo;
VkResult result;
pvr_csb_pack (&sampler_state[0U], TEXSTATE_SAMPLER, reg) {
pvr_init_tex_info(dev_info,
&tex_info,
num_query_indices,
- query_info->availability_write.index_bo->vma->dev_addr);
+ query_info->availability_write.index_bo->dev_addr);
result = pvr_pack_tex_state(device,
&tex_info,
return result;
}
- pipeline.const_buffer_addr = pvr_bo->vma->dev_addr;
+ pipeline.const_buffer_addr = pvr_bo->dev_addr;
vk_free(&cmd_buffer->vk.pool->alloc, const_buffer);
*/
static VkResult pvr_pds_pixel_event_program_create_and_upload(
struct pvr_device *device,
- const struct pvr_bo *usc_eot_program,
+ const struct pvr_suballoc_bo *usc_eot_program,
uint32_t usc_temp_count,
struct pvr_pds_upload *const pds_upload_out)
{
VkResult result;
pvr_pds_setup_doutu(&program.task_control,
- usc_eot_program->vma->dev_addr.addr,
+ usc_eot_program->dev_addr.addr,
usc_temp_count,
PVRX(PDSINST_DOUTU_SAMPLE_RATE_INSTANCE),
false);
usc_temp_count,
&pds_eot_program);
if (result != VK_SUCCESS) {
- pvr_bo_free(device, spm_eot_state->usc_eot_program);
+ pvr_bo_suballoc_free(spm_eot_state->usc_eot_program);
return result;
}
void pvr_spm_finish_eot_state(struct pvr_device *device,
struct pvr_spm_eot_state *spm_eot_state)
{
- pvr_bo_free(device, spm_eot_state->pixel_event_program_data_upload);
- pvr_bo_free(device, spm_eot_state->usc_eot_program);
+ pvr_bo_suballoc_free(spm_eot_state->pixel_event_program_data_upload);
+ pvr_bo_suballoc_free(spm_eot_state->usc_eot_program);
}
static VkFormat pvr_get_format_from_dword_count(uint32_t dword_count)
void pvr_spm_finish_bgobj_state(struct pvr_device *device,
struct pvr_spm_bgobj_state *spm_bgobj_state)
{
- pvr_bo_free(device, spm_bgobj_state->pds_texture_data_upload);
+ pvr_bo_suballoc_free(spm_bgobj_state->pds_texture_data_upload);
pvr_bo_free(device, spm_bgobj_state->consts_buffer);
}
uint64_t pbe_reg_words[PVR_MAX_COLOR_ATTACHMENTS]
[ROGUE_NUM_PBESTATE_REG_WORDS];
- struct pvr_bo *usc_eot_program;
+ struct pvr_suballoc_bo *usc_eot_program;
/* TODO: Make this struct pvr_pds_upload? It would pull in pvr_private.h
* though which causes a cycle since that includes pvr_spm.h .
* creation.
*/
uint64_t pixel_event_program_data_offset;
- struct pvr_bo *pixel_event_program_data_upload;
+ struct pvr_suballoc_bo *pixel_event_program_data_upload;
};
struct pvr_spm_bgobj_state {
/* TODO: Make this struct pvr_pds_upload? It would pull in pvr_private.h
* though which causes a cycle since that includes pvr_spm.h .
*/
- struct pvr_bo *pds_texture_data_upload;
+ struct pvr_suballoc_bo *pds_texture_data_upload;
uint64_t pds_reg_values[ROGUE_NUM_CR_PDS_BGRND_WORDS];
};
pvr_dev_addr_t kick_usc_pds_offset;
struct pvr_bo *kick_usc_pds_upload;
- struct pvr_bo *usc_upload;
+ struct pvr_suballoc_bo *usc_upload;
struct pvr_tq_frag_sh_reg_layout sh_reg_layout;
};
if (result != VK_SUCCESS)
goto err_free_entry;
- dev_addr = entry_data->usc_upload->vma->dev_addr;
+ dev_addr = entry_data->usc_upload->dev_addr;
dev_addr.addr -= device->heaps.usc_heap->base_addr.addr;
pvr_pds_setup_doutu(&kick_usc_pds_prog.usc_task_control,
return VK_SUCCESS;
err_free_usc_upload:
- pvr_bo_free(device, entry_data->usc_upload);
+ pvr_bo_suballoc_free(entry_data->usc_upload);
err_free_entry:
ralloc_free(entry_data);
const struct pvr_transfer_frag_store_entry_data *entry_data)
{
pvr_bo_free(device, entry_data->kick_usc_pds_upload);
- pvr_bo_free(device, entry_data->usc_upload);
+ pvr_bo_suballoc_free(entry_data->usc_upload);
}
static void inline pvr_transfer_frag_store_entry_data_destroy(