From 419c4b8ecec05288401fe9d167c46aad72801aa0 Mon Sep 17 00:00:00 2001 From: Connor Abbott Date: Fri, 8 Sep 2023 13:41:18 +0200 Subject: [PATCH] tu: Decouple program state from the pipeline There are a couple pieces of state that we can't calculate until we know all of the shaders: - The actual variants to use (i.e. whether to use safe-const variants) - Program config and VPC draw states - Const layout, which depends on the variants - Whether per-view viewports should be enabled Now that these are all combined in tu_pipeline::program, move these into a separate struct that can be referenced directly without a pipeline. The next step is to refactor the code filling it out so that it can be called at draw time when given just the shaders. Part-of: --- src/freedreno/vulkan/tu_cmd_buffer.cc | 131 ++++++++++++++++++---------------- src/freedreno/vulkan/tu_cmd_buffer.h | 4 +- src/freedreno/vulkan/tu_pipeline.cc | 24 +++---- src/freedreno/vulkan/tu_pipeline.h | 39 +++++----- 4 files changed, 104 insertions(+), 94 deletions(-) diff --git a/src/freedreno/vulkan/tu_cmd_buffer.cc b/src/freedreno/vulkan/tu_cmd_buffer.cc index 7370abc..cbb9183 100644 --- a/src/freedreno/vulkan/tu_cmd_buffer.cc +++ b/src/freedreno/vulkan/tu_cmd_buffer.cc @@ -3008,6 +3008,8 @@ tu_CmdBindPipeline(VkCommandBuffer commandBuffer, TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline); if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) { + cmd->state.shaders[MESA_SHADER_COMPUTE] = + pipeline->shaders[MESA_SHADER_COMPUTE]; cmd->state.compute_pipeline = tu_pipeline_to_compute(pipeline); tu_cs_emit_state_ib(&cmd->cs, pipeline->shaders[MESA_SHADER_COMPUTE]->state); @@ -3019,7 +3021,7 @@ tu_CmdBindPipeline(VkCommandBuffer commandBuffer, cmd->state.pipeline = tu_pipeline_to_graphics(pipeline); cmd->state.dirty |= TU_CMD_DIRTY_DESC_SETS | TU_CMD_DIRTY_SHADER_CONSTS | TU_CMD_DIRTY_VS_PARAMS | TU_CMD_DIRTY_LRZ | - TU_CMD_DIRTY_PIPELINE; + TU_CMD_DIRTY_PROGRAM; tu_bind_vs(cmd, pipeline->shaders[MESA_SHADER_VERTEX]); tu_bind_tcs(cmd, pipeline->shaders[MESA_SHADER_TESS_CTRL]); @@ -3029,6 +3031,7 @@ tu_CmdBindPipeline(VkCommandBuffer commandBuffer, vk_cmd_set_dynamic_graphics_state(&cmd->vk, &cmd->state.pipeline->dynamic_state); + cmd->state.program = pipeline->program; if (cmd->state.pipeline->feedback_loop_may_involve_textures && !cmd->state.rp.disable_gmem) { @@ -4230,36 +4233,32 @@ tu_CmdNextSubpass2(VkCommandBuffer commandBuffer, TU_GENX(tu_CmdNextSubpass2); static uint32_t -tu6_user_consts_size(const struct tu_pipeline *pipeline, +tu6_user_consts_size(const struct tu_const_state *const_state, gl_shader_stage type) { - const struct tu_program_descriptor_linkage *link = - &pipeline->program.link[type]; uint32_t dwords = 0; - if (link->tu_const_state.push_consts.dwords > 0) { - unsigned num_units = link->tu_const_state.push_consts.dwords; + if (const_state->push_consts.dwords > 0) { + unsigned num_units = const_state->push_consts.dwords; dwords += 4 + num_units; } - dwords += 8 * link->tu_const_state.num_inline_ubos; + dwords += 8 * const_state->num_inline_ubos; return dwords; } static void tu6_emit_user_consts(struct tu_cs *cs, - const struct tu_pipeline *pipeline, + const struct tu_const_state *const_state, + unsigned constlen, gl_shader_stage type, struct tu_descriptor_state *descriptors, uint32_t *push_constants) { - const struct tu_program_descriptor_linkage *link = - &pipeline->program.link[type]; - - if (link->tu_const_state.push_consts.dwords > 0) { - unsigned num_units = link->tu_const_state.push_consts.dwords; - unsigned offset = link->tu_const_state.push_consts.lo; + if (const_state->push_consts.dwords > 0) { + unsigned num_units = const_state->push_consts.dwords; + unsigned offset = const_state->push_consts.lo; /* DST_OFF and NUM_UNIT requires vec4 units */ tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + num_units); @@ -4277,10 +4276,10 @@ tu6_emit_user_consts(struct tu_cs *cs, /* Emit loads of inline uniforms. These load directly from the uniform's * storage space inside the descriptor set. */ - for (unsigned i = 0; i < link->tu_const_state.num_inline_ubos; i++) { - const struct tu_inline_ubo *ubo = &link->tu_const_state.ubos[i]; + for (unsigned i = 0; i < const_state->num_inline_ubos; i++) { + const struct tu_inline_ubo *ubo = &const_state->ubos[i]; - if (link->constlen <= ubo->const_offset_vec4) + if (constlen <= ubo->const_offset_vec4) continue; uint64_t va = descriptors->set_iova[ubo->base] & ~0x3f; @@ -4290,7 +4289,7 @@ tu6_emit_user_consts(struct tu_cs *cs, CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) | CP_LOAD_STATE6_0_STATE_SRC(ubo->push_address ? SS6_DIRECT : SS6_INDIRECT) | CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) | - CP_LOAD_STATE6_0_NUM_UNIT(MIN2(ubo->size_vec4, link->constlen - ubo->const_offset_vec4))); + CP_LOAD_STATE6_0_NUM_UNIT(MIN2(ubo->size_vec4, constlen - ubo->const_offset_vec4))); if (ubo->push_address) { tu_cs_emit(cs, 0); tu_cs_emit(cs, 0); @@ -4305,14 +4304,14 @@ tu6_emit_user_consts(struct tu_cs *cs, static void tu6_emit_shared_consts(struct tu_cs *cs, - const struct tu_pipeline *pipeline, + const struct tu_push_constant_range *shared_consts, uint32_t *push_constants, bool compute) { - if (pipeline->program.shared_consts.dwords > 0) { + if (shared_consts->dwords > 0) { /* Offset and num_units for shared consts are in units of dwords. */ - unsigned num_units = pipeline->program.shared_consts.dwords; - unsigned offset = pipeline->program.shared_consts.lo; + unsigned num_units = shared_consts->dwords; + unsigned offset = shared_consts->lo; enum a6xx_state_type st = compute ? ST6_UBO : ST6_CONSTANTS; uint32_t cp_load_state = compute ? CP_LOAD_STATE6_FRAG : CP_LOAD_STATE6; @@ -4333,20 +4332,21 @@ tu6_emit_shared_consts(struct tu_cs *cs, static uint32_t tu6_const_size(struct tu_cmd_buffer *cmd, - const struct tu_pipeline *pipeline, + const struct tu_push_constant_range *shared_consts, bool compute) { uint32_t dwords = 0; - if (pipeline->program.shared_consts.dwords > 0) { - dwords += pipeline->program.shared_consts.dwords + 4; + if (shared_consts->dwords > 0) { + dwords += shared_consts->dwords + 4; } if (compute) { - dwords += tu6_user_consts_size(pipeline, MESA_SHADER_COMPUTE); + dwords += + tu6_user_consts_size(&cmd->state.shaders[MESA_SHADER_COMPUTE]->const_state, MESA_SHADER_COMPUTE); } else { for (uint32_t type = MESA_SHADER_VERTEX; type <= MESA_SHADER_FRAGMENT; type++) - dwords += tu6_user_consts_size(pipeline, (gl_shader_stage) type); + dwords += tu6_user_consts_size(&cmd->state.shaders[type]->const_state, (gl_shader_stage) type); } return dwords; @@ -4354,12 +4354,14 @@ tu6_const_size(struct tu_cmd_buffer *cmd, static struct tu_draw_state tu6_emit_consts(struct tu_cmd_buffer *cmd, - const struct tu_pipeline *pipeline, bool compute) { uint32_t dwords = 0; + const struct tu_push_constant_range *shared_consts = + compute ? &cmd->state.shaders[MESA_SHADER_COMPUTE]->shared_consts : + &cmd->state.program.shared_consts; - dwords = tu6_const_size(cmd, pipeline, compute); + dwords = tu6_const_size(cmd, shared_consts, compute); if (dwords == 0) return (struct tu_draw_state) {}; @@ -4367,26 +4369,33 @@ tu6_emit_consts(struct tu_cmd_buffer *cmd, struct tu_cs cs; tu_cs_begin_sub_stream(&cmd->sub_cs, dwords, &cs); - if (pipeline->program.shared_consts.dwords > 0) { - tu6_emit_shared_consts(&cs, pipeline, cmd->push_constants, compute); + if (shared_consts->dwords > 0) { + tu6_emit_shared_consts(&cs, shared_consts, cmd->push_constants, compute); - for (uint32_t i = 0; i < ARRAY_SIZE(pipeline->program.link); i++) { + for (uint32_t i = 0; i < ARRAY_SIZE(cmd->state.program.link); i++) { const struct tu_program_descriptor_linkage *link = - &pipeline->program.link[i]; + &cmd->state.program.link[i]; assert(!link->tu_const_state.push_consts.dwords); } } if (compute) { - tu6_emit_user_consts(&cs, pipeline, MESA_SHADER_COMPUTE, + tu6_emit_user_consts(&cs, + &cmd->state.shaders[MESA_SHADER_COMPUTE]->const_state, + cmd->state.shaders[MESA_SHADER_COMPUTE]->variant->constlen, + MESA_SHADER_COMPUTE, tu_get_descriptors_state(cmd, VK_PIPELINE_BIND_POINT_COMPUTE), cmd->push_constants); } else { struct tu_descriptor_state *descriptors = tu_get_descriptors_state(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS); - for (uint32_t type = MESA_SHADER_VERTEX; type <= MESA_SHADER_FRAGMENT; type++) - tu6_emit_user_consts(&cs, pipeline, (gl_shader_stage) type, + for (uint32_t type = MESA_SHADER_VERTEX; type <= MESA_SHADER_FRAGMENT; type++) { + const struct tu_program_descriptor_linkage *link = + &cmd->state.program.link[type]; + tu6_emit_user_consts(&cs, &link->tu_const_state, link->constlen, + (gl_shader_stage) type, descriptors, cmd->push_constants); + } } return tu_cs_end_draw_state(&cmd->sub_cs, &cs); @@ -4500,7 +4509,7 @@ static uint32_t fs_params_offset(struct tu_cmd_buffer *cmd) { const struct tu_program_descriptor_linkage *link = - &cmd->state.pipeline->base.program.link[MESA_SHADER_FRAGMENT]; + &cmd->state.program.link[MESA_SHADER_FRAGMENT]; const struct ir3_const_state *const_state = &link->const_state; if (const_state->num_driver_params <= IR3_DP_FS_DYNAMIC) @@ -4516,7 +4525,7 @@ static uint32_t fs_params_size(struct tu_cmd_buffer *cmd) { const struct tu_program_descriptor_linkage *link = - &cmd->state.pipeline->base.program.link[MESA_SHADER_FRAGMENT]; + &cmd->state.program.link[MESA_SHADER_FRAGMENT]; const struct ir3_const_state *const_state = &link->const_state; return DIV_ROUND_UP(const_state->num_driver_params - IR3_DP_FS_DYNAMIC, 4); @@ -4621,6 +4630,7 @@ tu6_draw_common(struct tu_cmd_buffer *cmd, uint32_t draw_count) { const struct tu_pipeline *pipeline = &cmd->state.pipeline->base; + const struct tu_program_state *program = &cmd->state.program; struct tu_render_pass_state *rp = &cmd->state.rp; /* Emit state first, because it's needed for bandwidth calculations */ @@ -4729,7 +4739,7 @@ tu6_draw_common(struct tu_cmd_buffer *cmd, } if (dirty & TU_CMD_DIRTY_SHADER_CONSTS) - cmd->state.shader_const = tu6_emit_consts(cmd, pipeline, false); + cmd->state.shader_const = tu6_emit_consts(cmd, false); if (dirty & TU_CMD_DIRTY_DESC_SETS) tu6_emit_descriptor_sets(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS); @@ -4752,7 +4762,7 @@ tu6_draw_common(struct tu_cmd_buffer *cmd, bool dirty_fs_params = false; if (BITSET_TEST(cmd->vk.dynamic_graphics_state.dirty, MESA_VK_DYNAMIC_MS_RASTERIZATION_SAMPLES) || - (cmd->state.dirty & (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_FDM))) { + (cmd->state.dirty & (TU_CMD_DIRTY_PROGRAM | TU_CMD_DIRTY_FDM))) { tu6_emit_fs_params(cmd); dirty_fs_params = true; } @@ -4770,15 +4780,15 @@ tu6_draw_common(struct tu_cmd_buffer *cmd, if (dirty & TU_CMD_DIRTY_DRAW_STATE) { tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (TU_DRAW_STATE_COUNT - 2)); - tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM_CONFIG, pipeline->program.config_state); - tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS, pipeline->program.vs_state); - tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_BINNING, pipeline->program.vs_binning_state); - tu_cs_emit_draw_state(cs, TU_DRAW_STATE_HS, pipeline->program.hs_state); - tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS, pipeline->program.ds_state); - tu_cs_emit_draw_state(cs, TU_DRAW_STATE_GS, pipeline->program.gs_state); - tu_cs_emit_draw_state(cs, TU_DRAW_STATE_GS_BINNING, pipeline->program.gs_binning_state); - tu_cs_emit_draw_state(cs, TU_DRAW_STATE_FS, pipeline->program.fs_state); - tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VPC, pipeline->program.vpc_state); + tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM_CONFIG, program->config_state); + tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS, program->vs_state); + tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_BINNING, program->vs_binning_state); + tu_cs_emit_draw_state(cs, TU_DRAW_STATE_HS, program->hs_state); + tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS, program->ds_state); + tu_cs_emit_draw_state(cs, TU_DRAW_STATE_GS, program->gs_state); + tu_cs_emit_draw_state(cs, TU_DRAW_STATE_GS_BINNING, program->gs_binning_state); + tu_cs_emit_draw_state(cs, TU_DRAW_STATE_FS, program->fs_state); + tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VPC, program->vpc_state); tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PRIM_MODE_SYSMEM, pipeline->prim_order.state_sysmem); tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PRIM_MODE_GMEM, pipeline->prim_order.state_gmem); tu_cs_emit_draw_state(cs, TU_DRAW_STATE_CONST, cmd->state.shader_const); @@ -4886,7 +4896,7 @@ static uint32_t vs_params_offset(struct tu_cmd_buffer *cmd) { const struct tu_program_descriptor_linkage *link = - &cmd->state.pipeline->base.program.link[MESA_SHADER_VERTEX]; + &cmd->state.program.link[MESA_SHADER_VERTEX]; const struct ir3_const_state *const_state = &link->const_state; if (const_state->offsets.driver_param >= link->constlen) @@ -4923,7 +4933,8 @@ tu6_emit_vs_params(struct tu_cmd_buffer *cmd, /* Beside re-emitting params when they are changed, we should re-emit * them after constants are invalidated via HLSQ_INVALIDATE_CMD. */ - if (!(cmd->state.dirty & (TU_CMD_DIRTY_DRAW_STATE | TU_CMD_DIRTY_VS_PARAMS)) && + if (!(cmd->state.dirty & (TU_CMD_DIRTY_DRAW_STATE | TU_CMD_DIRTY_VS_PARAMS | + TU_CMD_DIRTY_PROGRAM)) && (offset == 0 || draw_id == cmd->state.last_vs_params.draw_id) && vertex_offset == cmd->state.last_vs_params.vertex_offset && first_instance == cmd->state.last_vs_params.first_instance) { @@ -5328,22 +5339,22 @@ struct tu_dispatch_info template static void tu_emit_compute_driver_params(struct tu_cmd_buffer *cmd, - struct tu_cs *cs, struct tu_compute_pipeline *pipeline, + struct tu_cs *cs, const struct tu_dispatch_info *info) { gl_shader_stage type = MESA_SHADER_COMPUTE; - const struct tu_program_descriptor_linkage *link = - &pipeline->base.program.link[type]; - const struct ir3_const_state *const_state = &link->const_state; + const struct tu_shader *shader = cmd->state.shaders[MESA_SHADER_COMPUTE]; + const struct ir3_shader_variant *variant = shader->variant; + const struct ir3_const_state *const_state = variant->const_state; uint32_t offset = const_state->offsets.driver_param; - unsigned subgroup_size = pipeline->subgroup_size; + unsigned subgroup_size = variant->info.subgroup_size; unsigned subgroup_shift = util_logbase2(subgroup_size); - if (link->constlen <= offset) + if (variant->constlen <= offset) return; uint32_t num_consts = MIN2(const_state->num_driver_params, - (link->constlen - offset) * 4); + (variant->constlen - offset) * 4); if (!info->indirect) { uint32_t driver_params[12] = { @@ -5485,9 +5496,9 @@ tu_dispatch(struct tu_cmd_buffer *cmd, tu_emit_cache_flush(cmd); /* note: no reason to have this in a separate IB */ - tu_cs_emit_state_ib(cs, tu6_emit_consts(cmd, &pipeline->base, true)); + tu_cs_emit_state_ib(cs, tu6_emit_consts(cmd, true)); - tu_emit_compute_driver_params(cmd, cs, pipeline, info); + tu_emit_compute_driver_params(cmd, cs, info); if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESC_SETS) { tu6_emit_descriptor_sets(cmd, VK_PIPELINE_BIND_POINT_COMPUTE); diff --git a/src/freedreno/vulkan/tu_cmd_buffer.h b/src/freedreno/vulkan/tu_cmd_buffer.h index 19090c4..12c0bc1 100644 --- a/src/freedreno/vulkan/tu_cmd_buffer.h +++ b/src/freedreno/vulkan/tu_cmd_buffer.h @@ -70,7 +70,7 @@ enum tu_cmd_dirty_bits TU_CMD_DIRTY_FDM = BIT(8), TU_CMD_DIRTY_PER_VIEW_VIEWPORT = BIT(9), TU_CMD_DIRTY_TES = BIT(10), - TU_CMD_DIRTY_PIPELINE = BIT(11), + TU_CMD_DIRTY_PROGRAM = BIT(11), /* all draw states were disabled and need to be re-enabled: */ TU_CMD_DIRTY_DRAW_STATE = BIT(12) }; @@ -390,6 +390,8 @@ struct tu_cmd_state struct tu_shader *shaders[MESA_SHADER_STAGES]; + struct tu_program_state program; + struct tu_render_pass_state rp; struct vk_render_pass_state vk_rp; diff --git a/src/freedreno/vulkan/tu_pipeline.cc b/src/freedreno/vulkan/tu_pipeline.cc index 20a1ce9..b933273 100644 --- a/src/freedreno/vulkan/tu_pipeline.cc +++ b/src/freedreno/vulkan/tu_pipeline.cc @@ -978,12 +978,12 @@ tu6_patch_control_points_size(struct tu_device *dev, const struct tu_shader *vs, const struct tu_shader *tcs, const struct tu_shader *tes, - const struct tu_pipeline *pipeline, + const struct tu_program_state *program, uint32_t patch_control_points) { #define EMIT_CONST_DWORDS(const_dwords) (4 + const_dwords) return EMIT_CONST_DWORDS(4) + - EMIT_CONST_DWORDS(pipeline->program.hs_param_dwords) + 2 + 2 + 2; + EMIT_CONST_DWORDS(program->hs_param_dwords) + 2 + 2 + 2; #undef EMIT_CONST_DWORDS } @@ -993,7 +993,7 @@ tu6_emit_patch_control_points(struct tu_cs *cs, const struct tu_shader *vs, const struct tu_shader *tcs, const struct tu_shader *tes, - const struct tu_pipeline *pipeline, + const struct tu_program_state *program, uint32_t patch_control_points) { if (!tcs->variant) @@ -1002,8 +1002,8 @@ tu6_emit_patch_control_points(struct tu_cs *cs, struct tu_device *dev = cs->device; tu6_emit_vs_params(cs, - &pipeline->program.link[MESA_SHADER_VERTEX].const_state, - pipeline->program.link[MESA_SHADER_VERTEX].constlen, + &program->link[MESA_SHADER_VERTEX].const_state, + program->link[MESA_SHADER_VERTEX].constlen, vs->variant->output_size, patch_control_points); @@ -1022,10 +1022,10 @@ tu6_emit_patch_control_points(struct tu_cs *cs, }; const struct ir3_const_state *hs_const = - &pipeline->program.link[MESA_SHADER_TESS_CTRL].const_state; + &program->link[MESA_SHADER_TESS_CTRL].const_state; uint32_t hs_base = hs_const->offsets.primitive_param; tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, hs_base, SB6_HS_SHADER, 0, - pipeline->program.hs_param_dwords, hs_params); + program->hs_param_dwords, hs_params); uint32_t patch_local_mem_size_16b = patch_control_points * vs->variant->output_size / 4; @@ -3457,7 +3457,7 @@ tu_pipeline_builder_emit_state(struct tu_pipeline_builder *builder, pipeline->shaders[MESA_SHADER_VERTEX], pipeline->shaders[MESA_SHADER_TESS_CTRL], pipeline->shaders[MESA_SHADER_TESS_EVAL], - pipeline, + &pipeline->program, builder->graphics_state.ts->patch_control_points); #undef DRAW_STATE #undef DRAW_STATE_COND @@ -3636,11 +3636,11 @@ tu_emit_draw_state(struct tu_cmd_buffer *cmd) &cmd->vk.dynamic_graphics_state.rs); DRAW_STATE_COND(patch_control_points, TU_DYNAMIC_STATE_PATCH_CONTROL_POINTS, - cmd->state.dirty & TU_CMD_DIRTY_PIPELINE, + cmd->state.dirty & TU_CMD_DIRTY_PROGRAM, cmd->state.shaders[MESA_SHADER_VERTEX], cmd->state.shaders[MESA_SHADER_TESS_CTRL], cmd->state.shaders[MESA_SHADER_TESS_EVAL], - &cmd->state.pipeline->base, + &cmd->state.program, cmd->vk.dynamic_graphics_state.ts.patch_control_points); #undef DRAW_STATE #undef DRAW_STATE_COND @@ -4318,8 +4318,6 @@ tu_compute_pipeline_create(VkDevice device, creation_feedback->pPipelineStageCreationFeedbacks[0] = pipeline_feedback; } - pipeline->base.program.shared_consts = shader->shared_consts; - pipeline->base.active_desc_sets = shader->active_desc_sets; v = shader->variant; @@ -4334,8 +4332,6 @@ tu_compute_pipeline_create(VkDevice device, for (int i = 0; i < 3; i++) pipeline->local_size[i] = v->local_size[i]; - pipeline->subgroup_size = v->info.subgroup_size; - if (CHIP == A6XX) { tu6_emit_load_state(&pipeline->base, layout); } diff --git a/src/freedreno/vulkan/tu_pipeline.h b/src/freedreno/vulkan/tu_pipeline.h index e586e9b..174cff3 100644 --- a/src/freedreno/vulkan/tu_pipeline.h +++ b/src/freedreno/vulkan/tu_pipeline.h @@ -80,6 +80,25 @@ struct tu_program_descriptor_linkage struct tu_const_state tu_const_state; }; +struct tu_program_state +{ + struct tu_draw_state config_state; + struct tu_draw_state vs_state, vs_binning_state; + struct tu_draw_state hs_state; + struct tu_draw_state ds_state; + struct tu_draw_state gs_state, gs_binning_state; + struct tu_draw_state vpc_state; + struct tu_draw_state fs_state; + + uint32_t hs_param_dwords; + + struct tu_push_constant_range shared_consts; + + struct tu_program_descriptor_linkage link[MESA_SHADER_STAGES]; + + bool per_view_viewport; +}; + struct tu_pipeline_executable { gl_shader_stage stage; @@ -137,24 +156,7 @@ struct tu_pipeline struct tu_shader *shaders[MESA_SHADER_STAGES]; - struct - { - struct tu_draw_state config_state; - struct tu_draw_state vs_state, vs_binning_state; - struct tu_draw_state hs_state; - struct tu_draw_state ds_state; - struct tu_draw_state gs_state, gs_binning_state; - struct tu_draw_state vpc_state; - struct tu_draw_state fs_state; - - uint32_t hs_param_dwords; - - struct tu_push_constant_range shared_consts; - - struct tu_program_descriptor_linkage link[MESA_SHADER_STAGES]; - - bool per_view_viewport; - } program; + struct tu_program_state program; struct tu_lrz_pipeline lrz; struct tu_bandwidth bandwidth; @@ -207,7 +209,6 @@ struct tu_compute_pipeline { struct tu_pipeline base; uint32_t local_size[3]; - uint32_t subgroup_size; uint32_t instrlen; }; -- 2.7.4