static VkResult pvr_setup_descriptor_mappings(
struct pvr_cmd_buffer *const cmd_buffer,
enum pvr_stage_allocation stage,
- const struct pvr_stage_allocation_uniform_state *uniform_state,
+ const struct pvr_stage_allocation_descriptor_state *descriptor_state,
UNUSED const pvr_dev_addr_t *const num_worgroups_buff_addr,
- uint32_t *const uniform_data_offset_out)
+ uint32_t *const descriptor_data_offset_out)
{
- const struct pvr_pds_info *const pds_info = &uniform_state->pds_info;
+ const struct pvr_pds_info *const pds_info = &descriptor_state->pds_info;
const struct pvr_descriptor_state *desc_state;
const uint8_t *entries;
uint32_t *dword_buffer;
/* TODO: See if instead of reusing the blend constant buffer type entry,
* we can setup a new buffer type specifically for num_workgroups or other
* built-in variables. The mappings are setup at pipeline creation when
- * creating the uniform program.
+ * creating the descriptor program.
*/
pvr_finishme("Handle blend constant reuse for compute.");
pvr_bo_cpu_unmap(cmd_buffer->device, pvr_bo);
- *uniform_data_offset_out =
+ *descriptor_data_offset_out =
pvr_bo->vma->dev_addr.addr -
cmd_buffer->device->heaps.pds_heap->base_addr.addr;
* allocation of the local/common store shared registers so we repurpose the
* deallocation PDS program.
*/
- if (pipeline->state.uniform.pds_info.code_size_in_dwords) {
+ if (pipeline->state.descriptor.pds_info.code_size_in_dwords) {
uint32_t pds_data_size_in_dwords =
- pipeline->state.uniform.pds_info.data_size_in_dwords;
+ pipeline->state.descriptor.pds_info.data_size_in_dwords;
- info.pds_data_offset = state->pds_compute_uniform_data_offset;
+ info.pds_data_offset = state->pds_compute_descriptor_data_offset;
info.pds_data_size =
DIV_ROUND_UP(pds_data_size_in_dwords << 2U,
PVRX(CDMCTRL_KERNEL0_PDS_DATA_SIZE_UNIT_SIZE));
/* Check that we have upload the code section. */
- assert(pipeline->state.uniform.pds_code.code_size);
- info.pds_code_offset = pipeline->state.uniform.pds_code.code_offset;
+ assert(pipeline->state.descriptor.pds_code.code_size);
+ info.pds_code_offset = pipeline->state.descriptor.pds_code.code_offset;
} else {
/* FIXME: There should be a deallocation pds program already uploaded
* that we use at this point.
if (result != VK_SUCCESS)
return;
- result =
- pvr_setup_descriptor_mappings(cmd_buffer,
- PVR_STAGE_ALLOCATION_COMPUTE,
- &compute_pipeline->state.uniform,
- &num_workgroups_bo->vma->dev_addr,
- &state->pds_compute_uniform_data_offset);
+ result = pvr_setup_descriptor_mappings(
+ cmd_buffer,
+ PVR_STAGE_ALLOCATION_COMPUTE,
+ &compute_pipeline->state.descriptor,
+ &num_workgroups_bo->vma->dev_addr,
+ &state->pds_compute_descriptor_data_offset);
if (result != VK_SUCCESS)
return;
} else if ((compute_pipeline->base.layout
->per_stage_descriptor_masks[PVR_STAGE_ALLOCATION_COMPUTE] &&
state->dirty.compute_desc_dirty) ||
state->dirty.compute_pipeline_binding || push_descriptors_dirty) {
- result =
- pvr_setup_descriptor_mappings(cmd_buffer,
- PVR_STAGE_ALLOCATION_COMPUTE,
- &compute_pipeline->state.uniform,
- NULL,
- &state->pds_compute_uniform_data_offset);
+ result = pvr_setup_descriptor_mappings(
+ cmd_buffer,
+ PVR_STAGE_ALLOCATION_COMPUTE,
+ &compute_pipeline->state.descriptor,
+ NULL,
+ &state->pds_compute_descriptor_data_offset);
if (result != VK_SUCCESS)
return;
}
static void
pvr_emit_dirty_pds_state(const struct pvr_cmd_buffer *const cmd_buffer,
struct pvr_sub_cmd_gfx *const sub_cmd,
- const uint32_t pds_vertex_uniform_data_offset)
+ const uint32_t pds_vertex_descriptor_data_offset)
{
const struct pvr_cmd_buffer_state *const state = &cmd_buffer->state;
- const struct pvr_stage_allocation_uniform_state *const vertex_uniform_state =
- &state->gfx_pipeline->vertex_shader_state.uniform_state;
+ const struct pvr_stage_allocation_descriptor_state
+ *const vertex_descriptor_state =
+ &state->gfx_pipeline->vertex_shader_state.descriptor_state;
const struct pvr_pipeline_stage_state *const vertex_stage_state =
&state->gfx_pipeline->vertex_shader_state.stage_state;
struct pvr_csb *const csb = &sub_cmd->control_stream;
- if (!vertex_uniform_state->pds_info.code_size_in_dwords)
+ if (!vertex_descriptor_state->pds_info.code_size_in_dwords)
return;
pvr_csb_emit (csb, VDMCTRL_PDS_STATE0, state0) {
DIV_ROUND_UP(vertex_stage_state->const_shared_reg_count << 2,
PVRX(VDMCTRL_PDS_STATE0_USC_COMMON_SIZE_UNIT_SIZE));
- state0.pds_data_size =
- DIV_ROUND_UP(vertex_uniform_state->pds_info.data_size_in_dwords << 2,
- PVRX(VDMCTRL_PDS_STATE0_PDS_DATA_SIZE_UNIT_SIZE));
+ state0.pds_data_size = DIV_ROUND_UP(
+ vertex_descriptor_state->pds_info.data_size_in_dwords << 2,
+ PVRX(VDMCTRL_PDS_STATE0_PDS_DATA_SIZE_UNIT_SIZE));
}
pvr_csb_emit (csb, VDMCTRL_PDS_STATE1, state1) {
- state1.pds_data_addr = PVR_DEV_ADDR(pds_vertex_uniform_data_offset);
+ state1.pds_data_addr = PVR_DEV_ADDR(pds_vertex_descriptor_data_offset);
state1.sd_type = PVRX(VDMCTRL_SD_TYPE_NONE);
}
pvr_csb_emit (csb, VDMCTRL_PDS_STATE2, state2) {
state2.pds_code_addr =
- PVR_DEV_ADDR(vertex_uniform_state->pds_code.code_offset);
+ PVR_DEV_ADDR(vertex_descriptor_state->pds_code.code_offset);
}
}
struct pvr_sub_cmd_gfx *const sub_cmd)
{
struct pvr_cmd_buffer_state *const state = &cmd_buffer->state;
- const struct pvr_stage_allocation_uniform_state *uniform_shader_state =
- &state->gfx_pipeline->fragment_shader_state.uniform_state;
+ const struct pvr_stage_allocation_descriptor_state *descriptor_shader_state =
+ &state->gfx_pipeline->fragment_shader_state.descriptor_state;
const struct pvr_pds_upload *pds_coeff_program =
&state->gfx_pipeline->fragment_shader_state.pds_coeff_program;
const struct pvr_pipeline_stage_state *fragment_state =
struct pvr_ppp_state *const ppp_state = &state->ppp_state;
const uint32_t pds_uniform_size =
- DIV_ROUND_UP(uniform_shader_state->pds_info.data_size_in_dwords,
+ DIV_ROUND_UP(descriptor_shader_state->pds_info.data_size_in_dwords,
PVRX(TA_STATE_PDS_SIZEINFO1_PDS_UNIFORMSIZE_UNIT_SIZE));
const uint32_t pds_varying_state_size =
shader_base.addr = PVR_DEV_ADDR(pds_upload->data_offset);
}
- if (uniform_shader_state->pds_code.pvr_bo) {
+ if (descriptor_shader_state->pds_code.pvr_bo) {
pvr_csb_pack (&ppp_state->pds.texture_uniform_code_base,
TA_STATE_PDS_TEXUNICODEBASE,
tex_base) {
tex_base.addr =
- PVR_DEV_ADDR(uniform_shader_state->pds_code.code_offset);
+ PVR_DEV_ADDR(descriptor_shader_state->pds_code.code_offset);
}
} else {
ppp_state->pds.texture_uniform_code_base = 0U;
pvr_csb_pack (&ppp_state->pds.uniform_state_data_base,
TA_STATE_PDS_UNIFORMDATABASE,
base) {
- base.addr = PVR_DEV_ADDR(state->pds_fragment_uniform_data_offset);
+ base.addr = PVR_DEV_ADDR(state->pds_fragment_descriptor_data_offset);
}
emit_state->pds_fragment_stateptr0 = true;
result = pvr_setup_descriptor_mappings(
cmd_buffer,
PVR_STAGE_ALLOCATION_FRAGMENT,
- &state->gfx_pipeline->fragment_shader_state.uniform_state,
+ &state->gfx_pipeline->fragment_shader_state.descriptor_state,
NULL,
- &state->pds_fragment_uniform_data_offset);
+ &state->pds_fragment_descriptor_data_offset);
if (result != VK_SUCCESS) {
mesa_loge("Could not setup fragment descriptor mappings.");
return result;
}
if (state->dirty.vertex_descriptors) {
- uint32_t pds_vertex_uniform_data_offset;
+ uint32_t pds_vertex_descriptor_data_offset;
result = pvr_setup_descriptor_mappings(
cmd_buffer,
PVR_STAGE_ALLOCATION_VERTEX_GEOMETRY,
- &state->gfx_pipeline->vertex_shader_state.uniform_state,
+ &state->gfx_pipeline->vertex_shader_state.descriptor_state,
NULL,
- &pds_vertex_uniform_data_offset);
+ &pds_vertex_descriptor_data_offset);
if (result != VK_SUCCESS) {
mesa_loge("Could not setup vertex descriptor mappings.");
return result;
pvr_emit_dirty_pds_state(cmd_buffer,
sub_cmd,
- pds_vertex_uniform_data_offset);
+ pds_vertex_descriptor_data_offset);
}
pvr_emit_dirty_ppp_state(cmd_buffer, sub_cmd);
* structs.
*/
typedef struct pvr_pds_buffer (
- *const pvr_pds_uniform_program_buffer_array_ptr)[PVR_PDS_MAX_BUFFERS];
+ *const pvr_pds_descriptor_program_buffer_array_ptr)[PVR_PDS_MAX_BUFFERS];
-static void pvr_pds_uniform_program_setup_buffers(
+static void pvr_pds_descriptor_program_setup_buffers(
bool robust_buffer_access,
const struct rogue_ubo_data *ubo_data,
- pvr_pds_uniform_program_buffer_array_ptr buffers_out_ptr,
+ pvr_pds_descriptor_program_buffer_array_ptr buffers_out_ptr,
uint32_t *const buffer_count_out)
{
struct pvr_pds_buffer *const buffers = *buffers_out_ptr;
*buffer_count_out = buffer_count;
}
-static VkResult pvr_pds_uniform_program_create_and_upload(
+static VkResult pvr_pds_descriptor_program_create_and_upload(
struct pvr_device *const device,
const VkAllocationCallbacks *const allocator,
const struct rogue_ubo_data *const ubo_data,
memset(pds_info_out, 0, sizeof(*pds_info_out));
- pvr_pds_uniform_program_setup_buffers(device->features.robustBufferAccess,
- ubo_data,
- &program.buffers,
- &program.buffer_count);
+ pvr_pds_descriptor_program_setup_buffers(device->features.robustBufferAccess,
+ ubo_data,
+ &program.buffers,
+ &program.buffer_count);
for (uint32_t dma = 0; dma < program.buffer_count; dma++) {
if (program.buffers[dma].type != PVR_BUFFER_TYPES_COMPILE_TIME)
return VK_SUCCESS;
}
-static void pvr_pds_uniform_program_destroy(
+static void pvr_pds_descriptor_program_destroy(
struct pvr_device *const device,
const struct VkAllocationCallbacks *const allocator,
struct pvr_pds_upload *const pds_code,
abort();
};
- result = pvr_pds_uniform_program_create_and_upload(
+ result = pvr_pds_descriptor_program_create_and_upload(
device,
allocator,
&ubo_data,
&explicit_const_usage,
compute_pipeline->base.layout,
PVR_STAGE_ALLOCATION_COMPUTE,
- &compute_pipeline->state.uniform.pds_code,
- &compute_pipeline->state.uniform.pds_info);
+ &compute_pipeline->state.descriptor.pds_code,
+ &compute_pipeline->state.descriptor.pds_info);
if (result != VK_SUCCESS)
goto err_free_shader;
&compute_pipeline->state.primary_program,
&compute_pipeline->state.primary_program_info);
if (result != VK_SUCCESS)
- goto err_free_uniform_program;
+ goto err_free_descriptor_program;
/* If the workgroup ID is required, then we require the base workgroup
* variant of the PDS compute program as well.
&compute_pipeline->state.primary_program,
&compute_pipeline->state.primary_program_info);
-err_free_uniform_program:
- pvr_bo_free(device, compute_pipeline->state.uniform.pds_code.pvr_bo);
+err_free_descriptor_program:
+ pvr_bo_free(device, compute_pipeline->state.descriptor.pds_code.pvr_bo);
err_free_shader:
pvr_bo_free(device, compute_pipeline->state.shader.bo);
allocator,
&compute_pipeline->state.primary_program,
&compute_pipeline->state.primary_program_info);
- pvr_pds_uniform_program_destroy(device,
- allocator,
- &compute_pipeline->state.uniform.pds_code,
- &compute_pipeline->state.uniform.pds_info);
+ pvr_pds_descriptor_program_destroy(
+ device,
+ allocator,
+ &compute_pipeline->state.descriptor.pds_code,
+ &compute_pipeline->state.descriptor.pds_info);
pvr_bo_free(device, compute_pipeline->state.shader.bo);
pvr_pipeline_finish(&compute_pipeline->base);
const uint32_t num_vertex_attrib_programs =
ARRAY_SIZE(gfx_pipeline->vertex_shader_state.pds_attrib_programs);
- pvr_pds_uniform_program_destroy(
+ pvr_pds_descriptor_program_destroy(
device,
allocator,
- &gfx_pipeline->fragment_shader_state.uniform_state.pds_code,
- &gfx_pipeline->fragment_shader_state.uniform_state.pds_info);
+ &gfx_pipeline->fragment_shader_state.descriptor_state.pds_code,
+ &gfx_pipeline->fragment_shader_state.descriptor_state.pds_info);
- pvr_pds_uniform_program_destroy(
+ pvr_pds_descriptor_program_destroy(
device,
allocator,
- &gfx_pipeline->vertex_shader_state.uniform_state.pds_code,
- &gfx_pipeline->vertex_shader_state.uniform_state.pds_info);
+ &gfx_pipeline->vertex_shader_state.descriptor_state.pds_code,
+ &gfx_pipeline->vertex_shader_state.descriptor_state.pds_info);
for (uint32_t i = 0; i < num_vertex_attrib_programs; i++) {
struct pvr_pds_attrib_program *const attrib_program =
if (result != VK_SUCCESS)
goto err_free_frag_program;
- result = pvr_pds_uniform_program_create_and_upload(
+ result = pvr_pds_descriptor_program_create_and_upload(
device,
allocator,
&ctx->common_data[MESA_SHADER_VERTEX].ubo_data,
&vert_explicit_const_usage,
gfx_pipeline->base.layout,
PVR_STAGE_ALLOCATION_VERTEX_GEOMETRY,
- &gfx_pipeline->vertex_shader_state.uniform_state.pds_code,
- &gfx_pipeline->vertex_shader_state.uniform_state.pds_info);
+ &gfx_pipeline->vertex_shader_state.descriptor_state.pds_code,
+ &gfx_pipeline->vertex_shader_state.descriptor_state.pds_info);
if (result != VK_SUCCESS)
goto err_free_vertex_attrib_program;
* scratch buffer for both vertex and fragment stage.
* Figure out the best place to do this.
*/
- /* assert(pvr_pds_uniform_program_variables.temp_buff_total_size == 0); */
+ /* assert(pvr_pds_descriptor_program_variables.temp_buff_total_size == 0); */
/* TODO: Implement spilling with the above. */
- /* TODO: Call pvr_pds_uniform_program_create_and_upload in a loop. */
+ /* TODO: Call pvr_pds_program_program_create_and_upload in a loop. */
/* FIXME: For now we pass in the same explicit_const_usage since it contains
* all invalid entries. Fix this by hooking it up to the compiler.
*/
- result = pvr_pds_uniform_program_create_and_upload(
+ result = pvr_pds_descriptor_program_create_and_upload(
device,
allocator,
&ctx->common_data[MESA_SHADER_FRAGMENT].ubo_data,
&frag_explicit_const_usage,
gfx_pipeline->base.layout,
PVR_STAGE_ALLOCATION_FRAGMENT,
- &gfx_pipeline->fragment_shader_state.uniform_state.pds_code,
- &gfx_pipeline->fragment_shader_state.uniform_state.pds_info);
+ &gfx_pipeline->fragment_shader_state.descriptor_state.pds_code,
+ &gfx_pipeline->fragment_shader_state.descriptor_state.pds_info);
if (result != VK_SUCCESS)
- goto err_free_vertex_uniform_program;
+ goto err_free_vertex_descriptor_program;
ralloc_free(ctx);
return VK_SUCCESS;
-err_free_vertex_uniform_program:
- pvr_pds_uniform_program_destroy(
+err_free_vertex_descriptor_program:
+ pvr_pds_descriptor_program_destroy(
device,
allocator,
- &gfx_pipeline->vertex_shader_state.uniform_state.pds_code,
- &gfx_pipeline->vertex_shader_state.uniform_state.pds_info);
+ &gfx_pipeline->vertex_shader_state.descriptor_state.pds_code,
+ &gfx_pipeline->vertex_shader_state.descriptor_state.pds_info);
err_free_vertex_attrib_program:
for (uint32_t i = 0;
i < ARRAY_SIZE(gfx_pipeline->vertex_shader_state.pds_attrib_programs);
/* Address of data segment for vertex attrib upload program. */
uint32_t pds_vertex_attrib_offset;
- uint32_t pds_fragment_uniform_data_offset;
- uint32_t pds_compute_uniform_data_offset;
+ uint32_t pds_fragment_descriptor_data_offset;
+ uint32_t pds_compute_descriptor_data_offset;
};
static_assert(
struct pvr_device *device;
};
-struct pvr_stage_allocation_uniform_state {
+struct pvr_stage_allocation_descriptor_state {
struct pvr_pds_upload pds_code;
/* Since we upload the code segment separately from the data segment
* pds_code->data_size might be 0 whilst
struct pvr_pipeline_stage_state stage_state;
/* FIXME: Move this into stage_state? */
- struct pvr_stage_allocation_uniform_state uniform_state;
+ struct pvr_stage_allocation_descriptor_state descriptor_state;
uint32_t vertex_input_size;
uint32_t vertex_output_size;
uint32_t user_clip_planes_mask;
struct pvr_pipeline_stage_state stage_state;
/* FIXME: Move this into stage_state? */
- struct pvr_stage_allocation_uniform_state uniform_state;
+ struct pvr_stage_allocation_descriptor_state descriptor_state;
uint32_t pass_type;
struct pvr_pds_upload pds_coeff_program;
uint32_t base_workgroup : 1;
} flags;
- struct pvr_stage_allocation_uniform_state uniform;
+ struct pvr_stage_allocation_descriptor_state descriptor;
struct pvr_pds_upload primary_program;
struct pvr_pds_info primary_program_info;