switch (id) {
case TU_DRAW_STATE_PROGRAM:
case TU_DRAW_STATE_VI:
- case TU_DRAW_STATE_FS_CONST:
/* The blob seems to not enable this (DESC_SETS_LOAD) for binning, even
* when resources would actually be used in the binning shader.
* Presumably the overhead of prefetching the resources isn't
tu_cs_emit_write_reg(cs, REG_A6XX_SP_CHICKEN_BITS, 0x00000410);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_IBO_COUNT, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B182, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_SHARED_CONSTS, 0);
+ tu_cs_emit_regs(cs, A6XX_HLSQ_SHARED_CONSTS(.enable = false));
tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_UNKNOWN_0E12, 0x3200000);
tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_CLIENT_PF, 4);
tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E01, 0x0);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A9A8, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_SP_MODE_CONTROL,
- A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE | 4);
+ tu_cs_emit_regs(cs, A6XX_SP_MODE_CONTROL(.constant_demotion_enable = true,
+ .isammode = ISAMMODE_GL,
+ .shared_consts_enable = false));
/* TODO: set A6XX_VFD_ADD_OFFSET_INSTANCE and fix ir3 to avoid adding base instance */
tu_cs_emit_write_reg(cs, REG_A6XX_VFD_ADD_OFFSET, A6XX_VFD_ADD_OFFSET_VERTEX);
tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
CP_SET_DRAW_STATE__0_DISABLE |
- CP_SET_DRAW_STATE__0_GROUP_ID(TU_DRAW_STATE_SHADER_GEOM_CONST));
+ CP_SET_DRAW_STATE__0_GROUP_ID(TU_DRAW_STATE_CONST));
tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
&pipeline->program.link[type];
uint32_t dwords = 0;
- if (link->push_consts.count > 0) {
- unsigned num_units = link->push_consts.count;
- dwords += 4 + num_units * 4;
+ if (link->push_consts.dwords > 0) {
+ unsigned num_units = link->push_consts.dwords;
+ dwords += 4 + num_units;
}
return dwords;
const struct tu_program_descriptor_linkage *link =
&pipeline->program.link[type];
- if (link->push_consts.count > 0) {
- unsigned num_units = link->push_consts.count;
+ if (link->push_consts.dwords > 0) {
+ unsigned num_units = link->push_consts.dwords;
unsigned offset = link->push_consts.lo;
- tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + num_units * 4);
- tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
+
+ /* DST_OFF and NUM_UNIT requires vec4 units */
+ tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + num_units);
+ tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset / 4) |
CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
- CP_LOAD_STATE6_0_NUM_UNIT(num_units));
+ CP_LOAD_STATE6_0_NUM_UNIT(num_units / 4));
tu_cs_emit(cs, 0);
tu_cs_emit(cs, 0);
- for (unsigned i = 0; i < num_units * 4; i++)
- tu_cs_emit(cs, push_constants[i + offset * 4]);
+ for (unsigned i = 0; i < num_units; i++)
+ tu_cs_emit(cs, push_constants[i + offset]);
}
}
-static struct tu_draw_state
-tu6_emit_consts(struct tu_cmd_buffer *cmd,
- const struct tu_pipeline *pipeline,
- gl_shader_stage type)
+static void
+tu6_emit_shared_consts(struct tu_cs *cs,
+ const struct tu_pipeline *pipeline,
+ uint32_t *push_constants,
+ bool compute)
{
- uint32_t dwords = tu6_user_consts_size(pipeline, type);
- if (dwords == 0)
- return (struct tu_draw_state) {};
+ if (pipeline->shared_consts.dwords > 0) {
+ /* Offset and num_units for shared consts are in units of dwords. */
+ unsigned num_units = pipeline->shared_consts.dwords;
+ unsigned offset = pipeline->shared_consts.lo;
- struct tu_cs cs;
- tu_cs_begin_sub_stream(&cmd->sub_cs, dwords, &cs);
+ enum a6xx_state_type st = compute ? ST6_UBO : ST6_CONSTANTS;
+ uint32_t cp_load_state = compute ? CP_LOAD_STATE6_FRAG : CP_LOAD_STATE6;
- tu6_emit_user_consts(&cs, pipeline, type, cmd->push_constants);
+ tu_cs_emit_pkt7(cs, cp_load_state, 3 + num_units);
+ tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
+ CP_LOAD_STATE6_0_STATE_TYPE(st) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(SB6_IBO) |
+ CP_LOAD_STATE6_0_NUM_UNIT(num_units));
+ tu_cs_emit(cs, 0);
+ tu_cs_emit(cs, 0);
- return tu_cs_end_draw_state(&cmd->sub_cs, &cs);
+ for (unsigned i = 0; i < num_units; i++)
+ tu_cs_emit(cs, push_constants[i + offset]);
+ }
+}
+
+static uint32_t
+tu6_const_size(struct tu_cmd_buffer *cmd,
+ const struct tu_pipeline *pipeline,
+ bool compute)
+{
+ uint32_t dwords = 0;
+
+ if (pipeline->shared_consts.dwords > 0) {
+ dwords = pipeline->shared_consts.dwords + 4;
+ } else {
+ if (compute) {
+ dwords = tu6_user_consts_size(pipeline, MESA_SHADER_COMPUTE);
+ } else {
+ for (uint32_t type = MESA_SHADER_VERTEX; type <= MESA_SHADER_FRAGMENT; type++)
+ dwords += tu6_user_consts_size(pipeline, type);
+ }
+ }
+
+ return dwords;
}
static struct tu_draw_state
-tu6_emit_consts_geom(struct tu_cmd_buffer *cmd,
- const struct tu_pipeline *pipeline)
+tu6_emit_consts(struct tu_cmd_buffer *cmd,
+ const struct tu_pipeline *pipeline,
+ bool compute)
{
uint32_t dwords = 0;
- for (uint32_t type = MESA_SHADER_VERTEX; type < MESA_SHADER_FRAGMENT; type++)
- dwords += tu6_user_consts_size(pipeline, type);
+ dwords = tu6_const_size(cmd, pipeline, compute);
if (dwords == 0)
return (struct tu_draw_state) {};
struct tu_cs cs;
tu_cs_begin_sub_stream(&cmd->sub_cs, dwords, &cs);
- for (uint32_t type = MESA_SHADER_VERTEX; type < MESA_SHADER_FRAGMENT; type++)
- tu6_emit_user_consts(&cs, pipeline, type, cmd->push_constants);
+ if (pipeline->shared_consts.dwords > 0) {
+ tu6_emit_shared_consts(&cs, pipeline, cmd->push_constants, compute);
+ } else {
+ if (compute) {
+ tu6_emit_user_consts(&cs, pipeline, MESA_SHADER_COMPUTE, cmd->push_constants);
+ } else {
+ for (uint32_t type = MESA_SHADER_VERTEX; type <= MESA_SHADER_FRAGMENT; type++)
+ tu6_emit_user_consts(&cs, pipeline, type, cmd->push_constants);
+ }
+ }
return tu_cs_end_draw_state(&cmd->sub_cs, &cs);
}
tu_cs_emit_regs(&cs, A6XX_RB_STENCIL_CONTROL(.dword = cmd->state.rb_stencil_cntl));
}
- if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
- cmd->state.shader_const[0] =
- tu6_emit_consts_geom(cmd, pipeline);
- cmd->state.shader_const[1] =
- tu6_emit_consts(cmd, pipeline, MESA_SHADER_FRAGMENT);
- }
+ if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS)
+ cmd->state.shader_const = tu6_emit_consts(cmd, pipeline, false);
if (cmd->state.dirty & TU_CMD_DIRTY_VIEWPORTS) {
struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_VIEWPORT, 8 + 10 * cmd->state.max_viewport);
tu_cs_emit_draw_state(cs, TU_DRAW_STATE_RAST, pipeline->rast_state);
tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PRIM_MODE_SYSMEM, pipeline->prim_order_state_sysmem);
tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PRIM_MODE_GMEM, pipeline->prim_order_state_gmem);
- tu_cs_emit_draw_state(cs, TU_DRAW_STATE_SHADER_GEOM_CONST, cmd->state.shader_const[0]);
- tu_cs_emit_draw_state(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const[1]);
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_CONST, cmd->state.shader_const);
tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS, cmd->state.desc_sets);
tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS_LOAD, pipeline->load_state);
tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers);
*/
bool emit_binding_stride = false, emit_blend = false;
uint32_t draw_state_count =
- ((cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) ? 2 : 0) +
+ ((cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) ? 1 : 0) +
((cmd->state.dirty & TU_CMD_DIRTY_DESC_SETS_LOAD) ? 1 : 0) +
((cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) ? 1 : 0) +
((cmd->state.dirty & TU_CMD_DIRTY_VS_PARAMS) ? 1 : 0) +
if (draw_state_count > 0)
tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * draw_state_count);
- if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
- tu_cs_emit_draw_state(cs, TU_DRAW_STATE_SHADER_GEOM_CONST, cmd->state.shader_const[0]);
- tu_cs_emit_draw_state(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const[1]);
- }
+ if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS)
+ tu_cs_emit_draw_state(cs, TU_DRAW_STATE_CONST, cmd->state.shader_const);
if (cmd->state.dirty & TU_CMD_DIRTY_DESC_SETS_LOAD)
tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS_LOAD, pipeline->load_state);
if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS)
tu_emit_cache_flush(cmd, cs);
/* note: no reason to have this in a separate IB */
- tu_cs_emit_state_ib(cs,
- tu6_emit_consts(cmd, pipeline, MESA_SHADER_COMPUTE));
+ tu_cs_emit_state_ib(cs, tu6_emit_consts(cmd, pipeline, true));
tu_emit_compute_driver_params(cmd, cs, pipeline, info);
}
static void
+tu6_emit_shared_consts_enable(struct tu_cs *cs, bool enable)
+{
+ /* Enable/disable shared constants */
+ tu_cs_emit_regs(cs, A6XX_HLSQ_SHARED_CONSTS(.enable = enable));
+ tu_cs_emit_regs(cs, A6XX_SP_MODE_CONTROL(.constant_demotion_enable = true,
+ .isammode = ISAMMODE_GL,
+ .shared_consts_enable = enable));
+}
+
+static void
tu6_emit_cs_config(struct tu_cs *cs,
const struct ir3_shader_variant *v,
const struct tu_pvtmem_config *pvtmem,
uint64_t binary_iova)
{
+ bool shared_consts_enable = ir3_const_state(v)->shared_consts_enable;
+ tu6_emit_shared_consts_enable(cs, shared_consts_enable);
+
tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(
.cs_state = true,
- .cs_ibo = true));
+ .cs_ibo = true,
+ .cs_shared_const = shared_consts_enable));
tu6_emit_xs_config(cs, MESA_SHADER_COMPUTE, v);
tu6_emit_xs(cs, MESA_SHADER_COMPUTE, v, pvtmem, binary_iova);
STATIC_ASSERT(MESA_SHADER_VERTEX == 0);
+ bool shared_consts_enable = builder->layout->push_constant_size > 0;
+ tu6_emit_shared_consts_enable(cs, shared_consts_enable);
+
tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(
.vs_state = true,
.hs_state = true,
.ds_state = true,
.gs_state = true,
.fs_state = true,
- .gfx_ibo = true));
+ .gfx_ibo = true,
+ .gfx_shared_const = shared_consts_enable));
for (; stage < ARRAY_SIZE(builder->shader_iova); stage++) {
tu6_emit_xs_config(cs, stage, builder->shaders->variants[stage]);
}
stage_infos[stage] = &builder->create_info->pStages[i];
}
+ if (builder->layout->push_constant_size > 0) {
+ pipeline->shared_consts = (struct tu_push_constant_range) {
+ .lo = 0,
+ .dwords = builder->layout->push_constant_size / 4,
+ };
+ }
+
struct tu_shader_key keys[ARRAY_SIZE(stage_infos)] = { };
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
stage < ARRAY_SIZE(keys); stage++) {
stage < ARRAY_SIZE(shaders); stage++) {
if (!shaders[stage])
continue;
-
+
int64_t stage_start = os_time_get_nano();
compiled_shaders->variants[stage] =
stage_feedbacks[stage].duration += os_time_get_nano() - stage_start;
}
+ compiled_shaders->shared_consts = pipeline->shared_consts;
uint32_t safe_constlens = ir3_trim_constlen(compiled_shaders->variants, compiler);
VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT;
}
+ if (layout->push_constant_size > 0) {
+ pipeline->shared_consts = (struct tu_push_constant_range) {
+ .lo = 0,
+ .dwords = layout->push_constant_size / 4,
+ };
+ }
+
char *nir_initial_disasm = NULL;
if (!compiled) {
compiled->active_desc_sets = shader->active_desc_sets;
compiled->push_consts[MESA_SHADER_COMPUTE] = shader->push_consts;
+ compiled->shared_consts = pipeline->shared_consts;
struct ir3_shader_variant *v =
ir3_shader_create_variant(shader->ir3_shader, &ir3_key, executable_info);
}
static void
-lower_load_push_constant(nir_builder *b, nir_intrinsic_instr *instr,
+lower_load_push_constant(struct tu_device *dev,
+ nir_builder *b,
+ nir_intrinsic_instr *instr,
struct tu_shader *shader)
{
uint32_t base = nir_intrinsic_base(instr);
assert(base % 4 == 0);
- assert(base >= shader->push_consts.lo * 16);
- base -= shader->push_consts.lo * 16;
+ assert(base >= shader->push_consts.lo * 4);
+ base -= shader->push_consts.lo * 4;
nir_ssa_def *load =
- nir_load_uniform(b, instr->num_components, instr->dest.ssa.bit_size,
- nir_ushr(b, instr->src[0].ssa, nir_imm_int(b, 2)),
- .base = base / 4);
+ nir_load_uniform(b, instr->num_components,
+ instr->dest.ssa.bit_size,
+ nir_ushr(b, instr->src[0].ssa, nir_imm_int(b, 2)),
+ .base = base + dev->compiler->shared_consts_base_offset * 4);
nir_ssa_def_rewrite_uses(&instr->dest.ssa, load);
{
switch (instr->intrinsic) {
case nir_intrinsic_load_push_constant:
- lower_load_push_constant(b, instr, shader);
+ lower_load_push_constant(dev, b, instr, shader);
return true;
case nir_intrinsic_load_vulkan_descriptor:
if (min >= max) {
tu_shader->push_consts.lo = 0;
- tu_shader->push_consts.count = 0;
+ tu_shader->push_consts.dwords = 0;
return;
}
- /* CP_LOAD_STATE OFFSET and NUM_UNIT are in units of vec4 (4 dwords),
- * however there's an alignment requirement of 4 on OFFSET. Expand the
- * range and change units accordingly.
+ /* CP_LOAD_STATE OFFSET and NUM_UNIT for SHARED_CONSTS are in units of
+ * dwords while loading regular consts is in units of vec4's.
+ * So we unify the unit here as dwords for tu_push_constant_range, then
+ * we should consider correct unit when emitting.
+ *
+ * Note there's an alignment requirement of 16 dwords on OFFSET. Expand
+ * the range and change units accordingly.
*/
- tu_shader->push_consts.lo = (min / 16) / 4 * 4;
- tu_shader->push_consts.count =
- align(max, 16) / 16 - tu_shader->push_consts.lo;
+ tu_shader->push_consts.lo = (min / 4) / 4 * 4;
+ tu_shader->push_consts.dwords =
+ align(max, 16) / 4 - tu_shader->push_consts.lo;
}
static bool
shader->ir3_shader =
ir3_shader_from_nir(dev->compiler, nir, &(struct ir3_shader_options) {
- .reserved_user_consts = align(shader->push_consts.count, 4),
+ .reserved_user_consts = 0,
+ .shared_consts_enable = layout->push_constant_size > 0,
.api_wavesize = key->api_wavesize,
.real_wavesize = key->real_wavesize,
}, &so_info);