struct radv_userdata_info *loc =
&ps_shader->info.user_sgprs_locs.shader_data[AC_UD_PS_EPILOG_PC];
- uint32_t base_reg = pipeline->base.user_data_0[MESA_SHADER_FRAGMENT];
+ uint32_t base_reg = ps_shader->info.user_data_0;
assert(loc->sgpr_idx != -1);
assert(loc->num_sgprs == 1);
radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs, base_reg + loc->sgpr_idx * 4,
}
}
- base_reg = pipeline->base.user_data_0[stage];
+ base_reg = last_vgt_shader->info.user_data_0;
radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, provoking_vtx);
}
const struct radv_userdata_info *loc =
radv_get_user_sgpr(last_vgt_shader, AC_UD_NUM_VERTS_PER_PRIM);
const struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
- const unsigned stage = last_vgt_shader->info.stage;
uint32_t base_reg;
assert(!cmd_buffer->state.mesh_shading);
if (loc->sgpr_idx == -1)
return;
- base_reg = pipeline->base.user_data_0[stage];
+ base_reg = last_vgt_shader->info.user_data_0;
radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4,
si_conv_prim_to_gs_out(d->vk.ia.primitive_topology, pipeline->is_ngg) + 1);
}
radv_emit_patch_control_points(struct radv_cmd_buffer *cmd_buffer)
{
const struct radv_physical_device *pdevice = cmd_buffer->device->physical_device;
- const struct radv_graphics_pipeline *pipeline = cmd_buffer->state.graphics_pipeline;
const struct radv_shader *tcs = cmd_buffer->state.shaders[MESA_SHADER_TESS_CTRL];
const struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
unsigned ls_hs_config, base_reg;
return;
assert(offchip->num_sgprs == 1);
- base_reg = pipeline->base.user_data_0[MESA_SHADER_TESS_CTRL];
+ base_reg = cmd_buffer->state.shaders[MESA_SHADER_TESS_CTRL]->info.user_data_0;
radeon_set_sh_reg(cmd_buffer->cs, base_reg + offchip->sgpr_idx * 4,
(cmd_buffer->state.tess_num_patches << 6) | d->vk.ts.patch_control_points);
radv_get_shader(cmd_buffer->state.shaders, MESA_SHADER_TESS_EVAL), AC_UD_TES_NUM_PATCHES);
assert(num_patches->sgpr_idx != -1 && num_patches->num_sgprs == 1);
- base_reg = pipeline->base.user_data_0[MESA_SHADER_TESS_EVAL];
+ const struct radv_shader *tes = radv_get_shader(cmd_buffer->state.shaders, MESA_SHADER_TESS_EVAL);
+ base_reg = tes->info.user_data_0;
radeon_set_sh_reg(cmd_buffer->cs, base_reg + num_patches->sgpr_idx * 4,
cmd_buffer->state.tess_num_patches);
}
const struct radv_userdata_info *loc =
radv_get_user_sgpr(cmd_buffer->state.shaders[MESA_SHADER_FRAGMENT], AC_UD_PS_NUM_SAMPLES);
if (loc->sgpr_idx != -1) {
- uint32_t base_reg = pipeline->base.user_data_0[MESA_SHADER_FRAGMENT];
+ uint32_t base_reg = cmd_buffer->state.shaders[MESA_SHADER_FRAGMENT]->info.user_data_0;
radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, rasterization_samples);
}
}
const struct radv_userdata_info *loc =
&vs_shader->info.user_sgprs_locs.shader_data[AC_UD_VS_PROLOG_INPUTS];
- uint32_t base_reg = cmd_buffer->state.graphics_pipeline->base.user_data_0[MESA_SHADER_VERTEX];
+ uint32_t base_reg = vs_shader->info.user_data_0;
assert(loc->sgpr_idx != -1);
assert(loc->num_sgprs == 2);
radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs, base_reg + loc->sgpr_idx * 4,
if (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) {
for (unsigned s = MESA_SHADER_VERTEX; s <= MESA_SHADER_FRAGMENT; s++)
if (radv_cmdbuf_has_stage(cmd_buffer, s))
- radv_emit_userdata_address(device, cs, cmd_buffer->state.shaders[s], pipeline->user_data_0[s],
+ radv_emit_userdata_address(device, cs, cmd_buffer->state.shaders[s],
+ cmd_buffer->state.shaders[s]->info.user_data_0,
AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
if (radv_cmdbuf_has_stage(cmd_buffer, MESA_SHADER_MESH))
radv_emit_userdata_address(device, cs, cmd_buffer->state.shaders[MESA_SHADER_MESH],
- pipeline->user_data_0[MESA_SHADER_MESH],
+ cmd_buffer->state.shaders[MESA_SHADER_MESH]->info.user_data_0,
AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
if (radv_cmdbuf_has_stage(cmd_buffer, MESA_SHADER_TASK))
radv_emit_userdata_address(device, cmd_buffer->ace_internal.cs,
cmd_buffer->state.shaders[MESA_SHADER_TASK],
- pipeline->user_data_0[MESA_SHADER_TASK],
+ cmd_buffer->state.shaders[MESA_SHADER_TASK]->info.user_data_0,
AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
} else {
radv_emit_userdata_address(device, cs, cmd_buffer->state.shaders[MESA_SHADER_COMPUTE],
- pipeline->user_data_0[MESA_SHADER_COMPUTE],
+ cmd_buffer->state.shaders[MESA_SHADER_COMPUTE]->info.user_data_0,
AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
}
}
if (stages & VK_SHADER_STAGE_COMPUTE_BIT) {
radv_emit_descriptor_pointers(device, cs, cmd_buffer->state.shaders[MESA_SHADER_COMPUTE],
- pipeline->user_data_0[MESA_SHADER_COMPUTE], descriptors_state);
+ cmd_buffer->state.shaders[MESA_SHADER_COMPUTE]->info.user_data_0,
+ descriptors_state);
} else {
radv_foreach_stage(stage, stages & ~VK_SHADER_STAGE_TASK_BIT_EXT)
{
continue;
radv_emit_descriptor_pointers(device, cs, cmd_buffer->state.shaders[stage],
- pipeline->user_data_0[stage], descriptors_state);
+ cmd_buffer->state.shaders[stage]->info.user_data_0,
+ descriptors_state);
}
if (stages & VK_SHADER_STAGE_TASK_BIT_EXT) {
radv_emit_descriptor_pointers(device, cmd_buffer->ace_internal.cs,
cmd_buffer->state.shaders[MESA_SHADER_TASK],
- pipeline->user_data_0[MESA_SHADER_TASK],
+ cmd_buffer->state.shaders[MESA_SHADER_TASK]->info.user_data_0,
descriptors_state);
}
}
if (internal_stages & VK_SHADER_STAGE_COMPUTE_BIT) {
radv_emit_all_inline_push_consts(device, cs, cmd_buffer->state.shaders[MESA_SHADER_COMPUTE],
- pipeline->user_data_0[MESA_SHADER_COMPUTE],
+ cmd_buffer->state.shaders[MESA_SHADER_COMPUTE]->info.user_data_0,
(uint32_t *)cmd_buffer->push_constants, &need_push_constants);
} else {
if (!shader)
continue;
- radv_emit_all_inline_push_consts(device, cs, shader, pipeline->user_data_0[stage],
+ radv_emit_all_inline_push_consts(device, cs, shader, shader->info.user_data_0,
(uint32_t *)cmd_buffer->push_constants,
&need_push_constants);
}
if (internal_stages & VK_SHADER_STAGE_TASK_BIT_EXT) {
radv_emit_all_inline_push_consts(device, cmd_buffer->ace_internal.cs,
cmd_buffer->state.shaders[MESA_SHADER_TASK],
- pipeline->user_data_0[MESA_SHADER_TASK],
+ cmd_buffer->state.shaders[MESA_SHADER_TASK]->info.user_data_0,
(uint32_t *)cmd_buffer->push_constants,
&need_push_constants);
}
if (internal_stages & VK_SHADER_STAGE_COMPUTE_BIT) {
radv_emit_userdata_address(device, cs, cmd_buffer->state.shaders[MESA_SHADER_COMPUTE],
- pipeline->user_data_0[MESA_SHADER_COMPUTE],
+ cmd_buffer->state.shaders[MESA_SHADER_COMPUTE]->info.user_data_0,
AC_UD_PUSH_CONSTANTS, va);
} else {
prev_shader = NULL;
/* Avoid redundantly emitting the address for merged stages. */
if (shader && shader != prev_shader) {
- radv_emit_userdata_address(device, cs, shader, pipeline->user_data_0[stage],
+ radv_emit_userdata_address(device, cs, shader, shader->info.user_data_0,
AC_UD_PUSH_CONSTANTS, va);
prev_shader = shader;
if (internal_stages & VK_SHADER_STAGE_TASK_BIT_EXT) {
radv_emit_userdata_address(device, cmd_buffer->ace_internal.cs,
cmd_buffer->state.shaders[MESA_SHADER_TASK],
- pipeline->user_data_0[MESA_SHADER_TASK],
+ cmd_buffer->state.shaders[MESA_SHADER_TASK]->info.user_data_0,
AC_UD_PUSH_CONSTANTS, va);
}
}
assert(!cmd_buffer->state.mesh_shading);
struct radv_graphics_pipeline *pipeline = cmd_buffer->state.graphics_pipeline;
+ struct radv_shader *vs = radv_get_shader(cmd_buffer->state.shaders, MESA_SHADER_VERTEX);
unsigned vb_offset;
void *vb_ptr;
uint64_t va;
va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
va += vb_offset;
- radv_emit_userdata_address(cmd_buffer->device, cmd_buffer->cs,
- radv_get_shader(cmd_buffer->state.shaders, MESA_SHADER_VERTEX),
- pipeline->base.user_data_0[MESA_SHADER_VERTEX],
+ radv_emit_userdata_address(cmd_buffer->device, cmd_buffer->cs, vs, vs->info.user_data_0,
AC_UD_VS_VERTEX_BUFFERS, va);
cmd_buffer->state.vb_va = va;
static void
radv_emit_streamout_buffers(struct radv_cmd_buffer *cmd_buffer, uint64_t va)
{
- struct radv_graphics_pipeline *pipeline = cmd_buffer->state.graphics_pipeline;
const struct radv_shader *last_vgt_shader = cmd_buffer->state.last_vgt_shader;
- const unsigned stage = last_vgt_shader->info.stage;
const struct radv_userdata_info *loc =
radv_get_user_sgpr(last_vgt_shader, AC_UD_STREAMOUT_BUFFERS);
uint32_t base_reg;
if (loc->sgpr_idx == -1)
return;
- base_reg = pipeline->base.user_data_0[stage];
+ base_reg = last_vgt_shader->info.user_data_0;
radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, va,
false);
{
struct radv_graphics_pipeline *pipeline = cmd_buffer->state.graphics_pipeline;
const struct radv_shader *last_vgt_shader = cmd_buffer->state.last_vgt_shader;
- const unsigned stage = last_vgt_shader->info.stage;
const struct radv_userdata_info *loc =
radv_get_user_sgpr(last_vgt_shader, AC_UD_NGG_QUERY_STATE);
enum radv_ngg_query_state ngg_query_state = radv_ngg_query_none;
ngg_query_state |= radv_ngg_query_prim_xfb | radv_ngg_query_prim_gen;
}
- base_reg = pipeline->base.user_data_0[stage];
+ base_reg = last_vgt_shader->info.user_data_0;
assert(loc->sgpr_idx != -1);
radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, ngg_query_state);
const struct radv_shader *last_vgt_shader = cmd_buffer->state.last_vgt_shader;
loc = radv_get_user_sgpr(last_vgt_shader, AC_UD_FORCE_VRS_RATES);
- base_reg = pipeline->base.user_data_0[last_vgt_shader->info.stage];
+ base_reg = last_vgt_shader->info.user_data_0;
}
assert(loc->sgpr_idx != -1);
static void
radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned index)
{
- struct radv_graphics_pipeline *pipeline = cmd_buffer->state.graphics_pipeline;
struct radeon_cmdbuf *cs = cmd_buffer->cs;
radv_foreach_stage(stage, cmd_buffer->state.active_stages & ~VK_SHADER_STAGE_TASK_BIT_EXT) {
- radv_emit_view_index_per_stage(cs, radv_get_shader(cmd_buffer->state.shaders, stage),
- pipeline->base.user_data_0[stage], index);
+ const struct radv_shader *shader = radv_get_shader(cmd_buffer->state.shaders, stage);
+
+ radv_emit_view_index_per_stage(cs, shader, shader->info.user_data_0, index);
}
if (cmd_buffer->state.gs_copy_shader) {
if (cmd_buffer->state.active_stages & VK_SHADER_STAGE_TASK_BIT_EXT) {
radv_emit_view_index_per_stage(cmd_buffer->ace_internal.cs,
cmd_buffer->state.shaders[MESA_SHADER_TASK],
- pipeline->base.user_data_0[MESA_SHADER_TASK], index);
+ cmd_buffer->state.shaders[MESA_SHADER_TASK]->info.user_data_0,
+ index);
}
}
static void
radv_emit_ngg_culling_state(struct radv_cmd_buffer *cmd_buffer)
{
- const struct radv_graphics_pipeline *pipeline = cmd_buffer->state.graphics_pipeline;
const struct radv_shader *last_vgt_shader = cmd_buffer->state.last_vgt_shader;
- const unsigned stage = last_vgt_shader->info.stage;
- const uint32_t base_reg = pipeline->base.user_data_0[stage];
+ const uint32_t base_reg = last_vgt_shader->info.user_data_0;
/* Get viewport transform. */
float vp_scale[2], vp_translate[2];
uint64_t indirect_va, enum radv_rt_mode mode)
{
struct radv_compute_pipeline *pipeline = &cmd_buffer->state.rt_pipeline->base;
- uint32_t base_reg = pipeline->base.user_data_0[MESA_SHADER_COMPUTE];
+ const struct radv_shader *compute_shader = cmd_buffer->state.shaders[MESA_SHADER_COMPUTE];
+ uint32_t base_reg = compute_shader->info.user_data_0;
/* Reserve scratch for stacks manually since it is not handled by the compute path. */
uint32_t scratch_bytes_per_wave = pipeline->base.scratch_bytes_per_wave;
- const struct radv_shader *compute_shader = cmd_buffer->state.shaders[MESA_SHADER_COMPUTE];
uint32_t wave_size = compute_shader->info.wave_size;
/* The hardware register is specified as a multiple of 256 DWORDS. */
return result;
}
-static uint32_t
-radv_pipeline_stage_to_user_data_0(struct radv_graphics_pipeline *pipeline, gl_shader_stage stage,
- enum amd_gfx_level gfx_level)
-{
- bool has_gs = radv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY);
- bool has_tess = radv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_CTRL);
- bool has_ngg = radv_pipeline_has_ngg(pipeline);
-
- switch (stage) {
- case MESA_SHADER_FRAGMENT:
- return R_00B030_SPI_SHADER_USER_DATA_PS_0;
- case MESA_SHADER_VERTEX:
- if (has_tess) {
- if (gfx_level >= GFX10) {
- return R_00B430_SPI_SHADER_USER_DATA_HS_0;
- } else if (gfx_level == GFX9) {
- return R_00B430_SPI_SHADER_USER_DATA_LS_0;
- } else {
- return R_00B530_SPI_SHADER_USER_DATA_LS_0;
- }
- }
-
- if (has_gs) {
- if (gfx_level >= GFX10) {
- return R_00B230_SPI_SHADER_USER_DATA_GS_0;
- } else {
- return R_00B330_SPI_SHADER_USER_DATA_ES_0;
- }
- }
-
- if (has_ngg)
- return R_00B230_SPI_SHADER_USER_DATA_GS_0;
-
- return R_00B130_SPI_SHADER_USER_DATA_VS_0;
- case MESA_SHADER_GEOMETRY:
- return gfx_level == GFX9 ? R_00B330_SPI_SHADER_USER_DATA_ES_0
- : R_00B230_SPI_SHADER_USER_DATA_GS_0;
- case MESA_SHADER_COMPUTE:
- case MESA_SHADER_TASK:
- return R_00B900_COMPUTE_USER_DATA_0;
- case MESA_SHADER_TESS_CTRL:
- return gfx_level == GFX9 ? R_00B430_SPI_SHADER_USER_DATA_LS_0
- : R_00B430_SPI_SHADER_USER_DATA_HS_0;
- case MESA_SHADER_TESS_EVAL:
- if (has_gs) {
- return gfx_level >= GFX10 ? R_00B230_SPI_SHADER_USER_DATA_GS_0
- : R_00B330_SPI_SHADER_USER_DATA_ES_0;
- } else if (has_ngg) {
- return R_00B230_SPI_SHADER_USER_DATA_GS_0;
- } else {
- return R_00B130_SPI_SHADER_USER_DATA_VS_0;
- }
- case MESA_SHADER_MESH:
- assert(has_ngg);
- return R_00B230_SPI_SHADER_USER_DATA_GS_0;
- default:
- unreachable("unknown shader");
- }
-}
-
static void
radv_pipeline_emit_depth_stencil_state(struct radeon_cmdbuf *ctx_cs,
const struct radv_depth_stencil_state *ds_state)
for (unsigned i = 0; i < MESA_VULKAN_SHADER_STAGES; i++) {
bool shader_exists = !!pipeline->base.shaders[i];
if (shader_exists || i < MESA_SHADER_COMPUTE) {
- /* We need this info for some stages even when the shader doesn't exist. */
- pipeline->base.user_data_0[i] = radv_pipeline_stage_to_user_data_0(
- pipeline, i, device->physical_device->rad_info.gfx_level);
-
if (shader_exists)
pipeline->base.need_indirect_descriptor_sets |=
radv_shader_need_indirect_descriptor_sets(pipeline->base.shaders[i]);
gl_shader_stage first_stage =
radv_pipeline_has_stage(pipeline, MESA_SHADER_MESH) ? MESA_SHADER_MESH : MESA_SHADER_VERTEX;
+ const struct radv_shader *shader = radv_get_shader(pipeline->base.shaders, first_stage);
const struct radv_userdata_info *loc =
- radv_get_user_sgpr(radv_get_shader(pipeline->base.shaders, first_stage),
- AC_UD_VS_BASE_VERTEX_START_INSTANCE);
+ radv_get_user_sgpr(shader, AC_UD_VS_BASE_VERTEX_START_INSTANCE);
+
if (loc->sgpr_idx != -1) {
- pipeline->vtx_base_sgpr = pipeline->base.user_data_0[first_stage];
+ pipeline->vtx_base_sgpr = shader->info.user_data_0;
pipeline->vtx_base_sgpr += loc->sgpr_idx * 4;
pipeline->vtx_emit_num = loc->num_sgprs;
pipeline->uses_drawid =
struct radv_compute_pipeline *pipeline,
const struct radv_pipeline_layout *layout)
{
- pipeline->base.user_data_0[MESA_SHADER_COMPUTE] = R_00B900_COMPUTE_USER_DATA_0;
pipeline->base.need_indirect_descriptor_sets |=
radv_shader_need_indirect_descriptor_sets(pipeline->base.shaders[MESA_SHADER_COMPUTE]);
radv_pipeline_init_scratch(device, &pipeline->base);