radv: introduce radv_shader_layout for per-stage descriptor layout
authorSamuel Pitoiset <samuel.pitoiset@gmail.com>
Tue, 8 Aug 2023 16:37:58 +0000 (18:37 +0200)
committerMarge Bot <emma+marge@anholt.net>
Thu, 10 Aug 2023 08:51:05 +0000 (08:51 +0000)
With pipelines, the shader layout is inherited from the pipeline layout
but with shader objects, the layout is passed through
VkCreateShaderInfoEXT.

This basically replaces uses of radv_pipeline_layout by
radv_shader_layout during shaders compilation. This will avoid
creating a pipeline layout with ESO.

Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24562>

src/amd/vulkan/nir/radv_nir.h
src/amd/vulkan/nir/radv_nir_apply_pipeline_layout.c
src/amd/vulkan/radv_pipeline.c
src/amd/vulkan/radv_pipeline_compute.c
src/amd/vulkan/radv_pipeline_graphics.c
src/amd/vulkan/radv_pipeline_rt.c
src/amd/vulkan/radv_private.h
src/amd/vulkan/radv_rt_shader.c
src/amd/vulkan/radv_shader.h
src/amd/vulkan/radv_shader_info.c

index 155720c..279f161 100644 (file)
@@ -40,11 +40,11 @@ struct radv_pipeline_key;
 struct radv_shader_stage;
 struct radv_shader_info;
 struct radv_shader_args;
+struct radv_shader_layout;
 struct radv_device;
 
-void radv_nir_apply_pipeline_layout(nir_shader *shader, struct radv_device *device,
-                                    const struct radv_pipeline_layout *layout, const struct radv_shader_info *info,
-                                    const struct radv_shader_args *args);
+void radv_nir_apply_pipeline_layout(nir_shader *shader, struct radv_device *device, const struct radv_shader_info *info,
+                                    const struct radv_shader_args *args, const struct radv_shader_layout *layout);
 
 void radv_nir_lower_abi(nir_shader *shader, enum amd_gfx_level gfx_level, const struct radv_shader_info *info,
                         const struct radv_shader_args *args, const struct radv_pipeline_key *pl_key,
index 14ae4a6..a031862 100644 (file)
@@ -38,7 +38,7 @@ typedef struct {
 
    const struct radv_shader_args *args;
    const struct radv_shader_info *info;
-   const struct radv_pipeline_layout *pipeline_layout;
+   const struct radv_shader_layout *layout;
 } apply_layout_state;
 
 static nir_ssa_def *
@@ -73,17 +73,16 @@ visit_vulkan_resource_index(nir_builder *b, apply_layout_state *state, nir_intri
 {
    unsigned desc_set = nir_intrinsic_desc_set(intrin);
    unsigned binding = nir_intrinsic_binding(intrin);
-   struct radv_descriptor_set_layout *layout = state->pipeline_layout->set[desc_set].layout;
+   struct radv_descriptor_set_layout *layout = state->layout->set[desc_set].layout;
    unsigned offset = layout->binding[binding].offset;
    unsigned stride;
 
    nir_ssa_def *set_ptr;
    if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
        layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
-      unsigned idx =
-         state->pipeline_layout->set[desc_set].dynamic_offset_start + layout->binding[binding].dynamic_offset_offset;
+      unsigned idx = state->layout->set[desc_set].dynamic_offset_start + layout->binding[binding].dynamic_offset_offset;
       set_ptr = get_scalar_arg(b, 1, state->args->ac.push_constants);
-      offset = state->pipeline_layout->push_constant_size + idx * 16;
+      offset = state->layout->push_constant_size + idx * 16;
       stride = 16;
    } else {
       set_ptr = load_desc_ptr(b, state, desc_set);
@@ -179,7 +178,7 @@ load_buffer_descriptor(nir_builder *b, apply_layout_state *state, nir_ssa_def *r
     * VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK.
     */
    if (binding.success) {
-      struct radv_descriptor_set_layout *layout = state->pipeline_layout->set[binding.desc_set].layout;
+      struct radv_descriptor_set_layout *layout = state->layout->set[binding.desc_set].layout;
       if (layout->binding[binding.binding].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK) {
          rsrc = nir_iadd(b, nir_channel(b, rsrc, 0), nir_channel(b, rsrc, 1));
          return load_inline_buffer_descriptor(b, state, rsrc);
@@ -226,7 +225,7 @@ get_sampler_desc(nir_builder *b, apply_layout_state *state, nir_deref_instr *der
    unsigned binding_index = var->data.binding;
    bool indirect = nir_deref_instr_has_indirect(deref);
 
-   struct radv_descriptor_set_layout *layout = state->pipeline_layout->set[desc_set].layout;
+   struct radv_descriptor_set_layout *layout = state->layout->set[desc_set].layout;
    struct radv_descriptor_set_binding_layout *binding = &layout->binding[binding_index];
 
    /* Handle immutable and embedded (compile-time) samplers
@@ -500,9 +499,8 @@ apply_layout_to_tex(nir_builder *b, apply_layout_state *state, nir_tex_instr *te
 }
 
 void
-radv_nir_apply_pipeline_layout(nir_shader *shader, struct radv_device *device,
-                               const struct radv_pipeline_layout *layout, const struct radv_shader_info *info,
-                               const struct radv_shader_args *args)
+radv_nir_apply_pipeline_layout(nir_shader *shader, struct radv_device *device, const struct radv_shader_info *info,
+                               const struct radv_shader_args *args, const struct radv_shader_layout *layout)
 {
    apply_layout_state state = {
       .gfx_level = device->physical_device->rad_info.gfx_level,
@@ -512,7 +510,7 @@ radv_nir_apply_pipeline_layout(nir_shader *shader, struct radv_device *device,
       .conformant_trunc_coord = device->physical_device->rad_info.conformant_trunc_coord,
       .args = args,
       .info = info,
-      .pipeline_layout = layout,
+      .layout = layout,
    };
 
    nir_builder b;
index 9d69832..ea45ec7 100644 (file)
@@ -286,10 +286,28 @@ radv_shader_stage_init(const VkPipelineShaderStageCreateInfo *sinfo, struct radv
    vk_pipeline_hash_shader_stage(sinfo, NULL, out_stage->shader_sha1);
 }
 
+void
+radv_shader_layout_init(const struct radv_pipeline_layout *pipeline_layout, gl_shader_stage stage,
+                        struct radv_shader_layout *layout)
+{
+   layout->num_sets = pipeline_layout->num_sets;
+   for (unsigned i = 0; i < pipeline_layout->num_sets; i++) {
+      layout->set[i].layout = pipeline_layout->set[i].layout;
+      layout->set[i].dynamic_offset_start = pipeline_layout->set[i].dynamic_offset_start;
+   }
+
+   layout->push_constant_size = pipeline_layout->push_constant_size;
+
+   if (pipeline_layout->dynamic_offset_count &&
+       (pipeline_layout->dynamic_shader_stages & mesa_to_vk_shader_stage(stage))) {
+      layout->use_dynamic_descriptors = true;
+   }
+}
+
 static const struct vk_ycbcr_conversion_state *
 ycbcr_conversion_lookup(const void *data, uint32_t set, uint32_t binding, uint32_t array_index)
 {
-   const struct radv_pipeline_layout *layout = data;
+   const struct radv_shader_layout *layout = data;
 
    const struct radv_descriptor_set_layout *set_layout = layout->set[set].layout;
    const struct vk_ycbcr_conversion_state *ycbcr_samplers = radv_immutable_ycbcr_samplers(set_layout, binding);
@@ -490,8 +508,8 @@ non_uniform_access_callback(const nir_src *src, void *_)
 }
 
 void
-radv_postprocess_nir(struct radv_device *device, const struct radv_pipeline_layout *pipeline_layout,
-                     const struct radv_pipeline_key *pipeline_key, struct radv_shader_stage *stage)
+radv_postprocess_nir(struct radv_device *device, const struct radv_pipeline_key *pipeline_key,
+                     struct radv_shader_stage *stage)
 {
    enum amd_gfx_level gfx_level = device->physical_device->rad_info.gfx_level;
    bool progress;
@@ -564,7 +582,7 @@ radv_postprocess_nir(struct radv_device *device, const struct radv_pipeline_layo
                                             .modes_N_comps = nir_var_mem_ubo | nir_var_mem_ssbo});
 
    progress = false;
-   NIR_PASS(progress, stage->nir, nir_vk_lower_ycbcr_tex, ycbcr_conversion_lookup, pipeline_layout);
+   NIR_PASS(progress, stage->nir, nir_vk_lower_ycbcr_tex, ycbcr_conversion_lookup, &stage->layout);
    /* Gather info in the case that nir_vk_lower_ycbcr_tex might have emitted resinfo instructions. */
    if (progress)
       nir_shader_gather_info(stage->nir, nir_shader_get_entrypoint(stage->nir));
@@ -588,7 +606,7 @@ radv_postprocess_nir(struct radv_device *device, const struct radv_pipeline_layo
    if (stage->nir->info.uses_resource_info_query)
       NIR_PASS(_, stage->nir, ac_nir_lower_resinfo, gfx_level);
 
-   NIR_PASS_V(stage->nir, radv_nir_apply_pipeline_layout, device, pipeline_layout, &stage->info, &stage->args);
+   NIR_PASS_V(stage->nir, radv_nir_apply_pipeline_layout, device, &stage->info, &stage->args, &stage->layout);
 
    if (!pipeline_key->optimisations_disabled) {
       NIR_PASS(_, stage->nir, nir_opt_shrink_vectors);
index 3aab46f..1939c4d 100644 (file)
@@ -128,9 +128,8 @@ radv_compute_pipeline_init(const struct radv_device *device, struct radv_compute
 
 static struct radv_shader *
 radv_compile_cs(struct radv_device *device, struct vk_pipeline_cache *cache, struct radv_shader_stage *cs_stage,
-                const struct radv_pipeline_key *pipeline_key, struct radv_pipeline_layout *pipeline_layout,
-                bool keep_executable_info, bool keep_statistic_info, bool is_internal,
-                struct radv_shader_binary **cs_binary)
+                const struct radv_pipeline_key *pipeline_key, bool keep_executable_info, bool keep_statistic_info,
+                bool is_internal, struct radv_shader_binary **cs_binary)
 {
    struct radv_shader *cs_shader;
 
@@ -144,7 +143,7 @@ radv_compile_cs(struct radv_device *device, struct vk_pipeline_cache *cache, str
 
    /* Run the shader info pass. */
    radv_nir_shader_info_init(cs_stage->stage, MESA_SHADER_NONE, &cs_stage->info);
-   radv_nir_shader_info_pass(device, cs_stage->nir, pipeline_layout, pipeline_key, RADV_PIPELINE_COMPUTE, false,
+   radv_nir_shader_info_pass(device, cs_stage->nir, &cs_stage->layout, pipeline_key, RADV_PIPELINE_COMPUTE, false,
                              &cs_stage->info);
 
    radv_declare_shader_args(device, pipeline_key, &cs_stage->info, MESA_SHADER_COMPUTE, MESA_SHADER_NONE,
@@ -154,7 +153,7 @@ radv_compile_cs(struct radv_device *device, struct vk_pipeline_cache *cache, str
    cs_stage->info.inline_push_constant_mask = cs_stage->args.ac.inline_push_const_mask;
 
    /* Postprocess NIR. */
-   radv_postprocess_nir(device, pipeline_layout, pipeline_key, cs_stage);
+   radv_postprocess_nir(device, pipeline_key, cs_stage);
 
    if (radv_can_dump_shader(device, cs_stage->nir, false))
       nir_print_shader(cs_stage->nir, stderr);
@@ -198,6 +197,7 @@ radv_compute_pipeline_compile(struct radv_compute_pipeline *pipeline, struct rad
    int64_t pipeline_start = os_time_get_nano();
 
    radv_shader_stage_init(pStage, &cs_stage, MESA_SHADER_COMPUTE);
+   radv_shader_layout_init(pipeline_layout, MESA_SHADER_COMPUTE, &cs_stage.layout);
 
    radv_hash_shaders(hash, &cs_stage, 1, pipeline_layout, pipeline_key,
                      radv_get_hash_flags(device, keep_statistic_info));
@@ -219,8 +219,8 @@ radv_compute_pipeline_compile(struct radv_compute_pipeline *pipeline, struct rad
    int64_t stage_start = os_time_get_nano();
 
    pipeline->base.shaders[MESA_SHADER_COMPUTE] =
-      radv_compile_cs(device, cache, &cs_stage, pipeline_key, pipeline_layout, keep_executable_info,
-                      keep_statistic_info, pipeline->base.is_internal, &cs_binary);
+      radv_compile_cs(device, cache, &cs_stage, pipeline_key, keep_executable_info, keep_statistic_info,
+                      pipeline->base.is_internal, &cs_binary);
 
    cs_stage.feedback.duration += os_time_get_nano() - stage_start;
 
index 9bb6be7..2f3ef2d 100644 (file)
@@ -2125,8 +2125,8 @@ radv_get_next_stage(gl_shader_stage stage, VkShaderStageFlagBits active_nir_stag
 
 static void
 radv_fill_shader_info(struct radv_device *device, const enum radv_pipeline_type pipeline_type,
-                      struct radv_pipeline_layout *pipeline_layout, const struct radv_pipeline_key *pipeline_key,
-                      struct radv_shader_stage *stages, VkShaderStageFlagBits active_nir_stages)
+                      const struct radv_pipeline_key *pipeline_key, struct radv_shader_stage *stages,
+                      VkShaderStageFlagBits active_nir_stages)
 {
    radv_foreach_stage(i, active_nir_stages)
    {
@@ -2136,8 +2136,8 @@ radv_fill_shader_info(struct radv_device *device, const enum radv_pipeline_type
          consider_force_vrs = radv_consider_force_vrs(device, &stages[i], &stages[MESA_SHADER_FRAGMENT]);
       }
 
-      radv_nir_shader_info_pass(device, stages[i].nir, pipeline_layout, pipeline_key, pipeline_type, consider_force_vrs,
-                                &stages[i].info);
+      radv_nir_shader_info_pass(device, stages[i].nir, &stages[i].layout, pipeline_key, pipeline_type,
+                                consider_force_vrs, &stages[i].info);
    }
 
    radv_nir_shader_info_link(device, pipeline_key, stages);
@@ -2190,8 +2190,8 @@ radv_declare_pipeline_args(struct radv_device *device, struct radv_shader_stage
 static struct radv_shader *
 radv_create_gs_copy_shader(struct radv_device *device, struct vk_pipeline_cache *cache,
                            struct radv_shader_stage *gs_stage, const struct radv_pipeline_key *pipeline_key,
-                           const struct radv_pipeline_layout *pipeline_layout, bool keep_executable_info,
-                           bool keep_statistic_info, struct radv_shader_binary **gs_copy_binary)
+                           bool keep_executable_info, bool keep_statistic_info,
+                           struct radv_shader_binary **gs_copy_binary)
 {
    const struct radv_shader_info *gs_info = &gs_stage->info;
    ac_nir_gs_output_info output_info = {
@@ -2211,7 +2211,7 @@ radv_create_gs_copy_shader(struct radv_device *device, struct vk_pipeline_cache
       .shader_sha1 = {0},
    };
    radv_nir_shader_info_init(gs_copy_stage.stage, MESA_SHADER_FRAGMENT, &gs_copy_stage.info);
-   radv_nir_shader_info_pass(device, nir, pipeline_layout, pipeline_key, RADV_PIPELINE_GRAPHICS, false,
+   radv_nir_shader_info_pass(device, nir, &gs_stage->layout, pipeline_key, RADV_PIPELINE_GRAPHICS, false,
                              &gs_copy_stage.info);
    gs_copy_stage.info.wave_size = 64;      /* Wave32 not supported. */
    gs_copy_stage.info.workgroup_size = 64; /* HW VS: separate waves, no workgroups */
@@ -2248,10 +2248,10 @@ radv_create_gs_copy_shader(struct radv_device *device, struct vk_pipeline_cache
 static void
 radv_graphics_shaders_nir_to_asm(struct radv_device *device, struct vk_pipeline_cache *cache,
                                  struct radv_shader_stage *stages, const struct radv_pipeline_key *pipeline_key,
-                                 const struct radv_pipeline_layout *pipeline_layout, bool keep_executable_info,
-                                 bool keep_statistic_info, VkShaderStageFlagBits active_nir_stages,
-                                 struct radv_shader **shaders, struct radv_shader_binary **binaries,
-                                 struct radv_shader **gs_copy_shader, struct radv_shader_binary **gs_copy_binary)
+                                 bool keep_executable_info, bool keep_statistic_info,
+                                 VkShaderStageFlagBits active_nir_stages, struct radv_shader **shaders,
+                                 struct radv_shader_binary **binaries, struct radv_shader **gs_copy_shader,
+                                 struct radv_shader_binary **gs_copy_binary)
 {
    for (int s = MESA_VULKAN_SHADER_STAGES - 1; s >= 0; s--) {
       if (!(active_nir_stages & (1 << s)))
@@ -2287,9 +2287,8 @@ radv_graphics_shaders_nir_to_asm(struct radv_device *device, struct vk_pipeline_
                                       &stages[s].info);
 
       if (s == MESA_SHADER_GEOMETRY && !stages[s].info.is_ngg) {
-         *gs_copy_shader =
-            radv_create_gs_copy_shader(device, cache, &stages[MESA_SHADER_GEOMETRY], pipeline_key, pipeline_layout,
-                                       keep_executable_info, keep_statistic_info, gs_copy_binary);
+         *gs_copy_shader = radv_create_gs_copy_shader(device, cache, &stages[MESA_SHADER_GEOMETRY], pipeline_key,
+                                                      keep_executable_info, keep_statistic_info, gs_copy_binary);
       }
 
       stages[s].feedback.duration += os_time_get_nano() - stage_start;
@@ -2339,6 +2338,7 @@ radv_pipeline_import_retained_shaders(const struct radv_device *device, struct r
          continue;
 
       radv_shader_stage_init(sinfo, &stages[s], s);
+      radv_shader_layout_init(&lib->layout, s, &stages[s].layout);
    }
 
    /* Import the NIR shaders (after SPIRV->NIR). */
@@ -2359,6 +2359,8 @@ radv_pipeline_import_retained_shaders(const struct radv_device *device, struct r
       stages[s].entrypoint = nir_shader_get_entrypoint(stages[s].nir)->function->name;
       memcpy(stages[s].shader_sha1, retained_shaders->stages[s].shader_sha1, sizeof(stages[s].shader_sha1));
 
+      radv_shader_layout_init(&lib->layout, s, &stages[s].layout);
+
       stages[s].feedback.flags |= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT;
 
       stages[s].feedback.duration += os_time_get_nano() - stage_start;
@@ -2496,8 +2498,7 @@ radv_skip_graphics_pipeline_compile(const struct radv_device *device, const stru
 static void
 radv_graphics_shaders_compile(struct radv_device *device, struct vk_pipeline_cache *cache,
                               struct radv_shader_stage *stages, const struct radv_pipeline_key *pipeline_key,
-                              struct radv_pipeline_layout *pipeline_layout, bool keep_executable_info,
-                              bool keep_statistic_info, bool is_internal,
+                              bool keep_executable_info, bool keep_statistic_info, bool is_internal,
                               struct radv_retained_shaders *retained_shaders, bool noop_fs,
                               struct radv_shader **shaders, struct radv_shader_binary **binaries,
                               struct radv_shader **gs_copy_shader, struct radv_shader_binary **gs_copy_binary)
@@ -2586,7 +2587,7 @@ radv_graphics_shaders_compile(struct radv_device *device, struct vk_pipeline_cac
       radv_nir_lower_poly_line_smooth(stages[MESA_SHADER_FRAGMENT].nir, pipeline_key);
    }
 
-   radv_fill_shader_info(device, RADV_PIPELINE_GRAPHICS, pipeline_layout, pipeline_key, stages, active_nir_stages);
+   radv_fill_shader_info(device, RADV_PIPELINE_GRAPHICS, pipeline_key, stages, active_nir_stages);
 
    radv_declare_pipeline_args(device, stages, pipeline_key, active_nir_stages);
 
@@ -2594,7 +2595,7 @@ radv_graphics_shaders_compile(struct radv_device *device, struct vk_pipeline_cac
    {
       int64_t stage_start = os_time_get_nano();
 
-      radv_postprocess_nir(device, pipeline_layout, pipeline_key, &stages[i]);
+      radv_postprocess_nir(device, pipeline_key, &stages[i]);
 
       stages[i].feedback.duration += os_time_get_nano() - stage_start;
 
@@ -2603,9 +2604,8 @@ radv_graphics_shaders_compile(struct radv_device *device, struct vk_pipeline_cac
    }
 
    /* Compile NIR shaders to AMD assembly. */
-   radv_graphics_shaders_nir_to_asm(device, cache, stages, pipeline_key, pipeline_layout, keep_executable_info,
-                                    keep_statistic_info, active_nir_stages, shaders, binaries, gs_copy_shader,
-                                    gs_copy_binary);
+   radv_graphics_shaders_nir_to_asm(device, cache, stages, pipeline_key, keep_executable_info, keep_statistic_info,
+                                    active_nir_stages, shaders, binaries, gs_copy_shader, gs_copy_binary);
 
    if (keep_executable_info) {
       for (int i = 0; i < MESA_VULKAN_SHADER_STAGES; ++i) {
@@ -2664,6 +2664,7 @@ radv_graphics_pipeline_compile(struct radv_graphics_pipeline *pipeline, const Vk
          continue;
 
       radv_shader_stage_init(sinfo, &stages[stage], stage);
+      radv_shader_layout_init(pipeline_layout, stage, &stages[stage].layout);
    }
 
    radv_pipeline_load_retained_shaders(device, pipeline, pCreateInfo, stages);
@@ -2727,9 +2728,9 @@ radv_graphics_pipeline_compile(struct radv_graphics_pipeline *pipeline, const Vk
 
    const bool noop_fs = radv_pipeline_needs_noop_fs(pipeline, pipeline_key);
 
-   radv_graphics_shaders_compile(device, cache, stages, pipeline_key, pipeline_layout, keep_executable_info,
-                                 keep_statistic_info, pipeline->base.is_internal, retained_shaders, noop_fs,
-                                 pipeline->base.shaders, binaries, &pipeline->base.gs_copy_shader, &gs_copy_binary);
+   radv_graphics_shaders_compile(device, cache, stages, pipeline_key, keep_executable_info, keep_statistic_info,
+                                 pipeline->base.is_internal, retained_shaders, noop_fs, pipeline->base.shaders,
+                                 binaries, &pipeline->base.gs_copy_shader, &gs_copy_binary);
 
    if (!radv_pipeline_create_ps_epilog(device, pipeline, pipeline_key, lib_flags, &ps_epilog_binary))
       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
index 54cb6e5..e0cf423 100644 (file)
@@ -254,12 +254,14 @@ static void
 radv_rt_fill_stage_info(struct radv_device *device, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
                         struct radv_ray_tracing_stage *stages, struct radv_pipeline_key *key)
 {
+   RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
    uint32_t idx;
    for (idx = 0; idx < pCreateInfo->stageCount; idx++) {
       stages[idx].stage = vk_to_mesa_shader_stage(pCreateInfo->pStages[idx].stage);
 
       struct radv_shader_stage stage;
       radv_shader_stage_init(&pCreateInfo->pStages[idx], &stage, stages[idx].stage);
+      radv_shader_layout_init(pipeline_layout, stages[idx].stage, &stage.layout);
 
       radv_hash_shaders(stages[idx].sha1, &stage, 1, NULL, key, radv_get_hash_flags(device, false));
    }
@@ -350,14 +352,13 @@ radv_rt_nir_to_asm(struct radv_device *device, struct vk_pipeline_cache *cache,
                    struct radv_shader **out_shader)
 {
    struct radv_shader_binary *binary;
-   RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
    bool keep_executable_info = radv_pipeline_capture_shaders(device, pipeline->base.base.create_flags);
    bool keep_statistic_info = radv_pipeline_capture_shader_stats(device, pipeline->base.base.create_flags);
 
    /* Gather shader info. */
    nir_shader_gather_info(stage->nir, nir_shader_get_entrypoint(stage->nir));
    radv_nir_shader_info_init(stage->stage, MESA_SHADER_NONE, &stage->info);
-   radv_nir_shader_info_pass(device, stage->nir, pipeline_layout, pipeline_key, RADV_PIPELINE_RAY_TRACING, false,
+   radv_nir_shader_info_pass(device, stage->nir, &stage->layout, pipeline_key, RADV_PIPELINE_RAY_TRACING, false,
                              &stage->info);
 
    /* Declare shader arguments. */
@@ -395,7 +396,7 @@ radv_rt_nir_to_asm(struct radv_device *device, struct vk_pipeline_cache *cache,
       temp_stage.nir = shaders[i];
       radv_nir_lower_rt_abi(temp_stage.nir, pCreateInfo, &temp_stage.args, &stage->info, stack_size, i > 0);
       radv_optimize_nir(temp_stage.nir, pipeline_key->optimisations_disabled);
-      radv_postprocess_nir(device, pipeline_layout, pipeline_key, &temp_stage);
+      radv_postprocess_nir(device, pipeline_key, &temp_stage);
 
       if (radv_can_dump_shader(device, temp_stage.nir, false))
          nir_print_shader(temp_stage.nir, stderr);
@@ -441,6 +442,8 @@ radv_rt_compile_shaders(struct radv_device *device, struct vk_pipeline_cache *ca
                         const struct radv_pipeline_key *key, struct radv_ray_tracing_pipeline *pipeline,
                         struct radv_serialized_shader_arena_block *capture_replay_handles)
 {
+   RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
+
    if (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR)
       return VK_PIPELINE_COMPILE_REQUIRED;
    VkResult result = VK_SUCCESS;
@@ -451,12 +454,13 @@ radv_rt_compile_shaders(struct radv_device *device, struct vk_pipeline_cache *ca
       int64_t stage_start = os_time_get_nano();
       struct radv_shader_stage stage;
       radv_shader_stage_init(&pCreateInfo->pStages[idx], &stage, stages[idx].stage);
+      radv_shader_layout_init(pipeline_layout, stages[idx].stage, &stage.layout);
 
       if (stages[idx].shader)
          goto feedback;
 
       /* precompile the shader */
-      stage.nir = radv_parse_rt_stage(device, &pCreateInfo->pStages[idx], key);
+      stage.nir = radv_parse_rt_stage(device, &pCreateInfo->pStages[idx], key, pipeline_layout);
 
       if (radv_ray_tracing_stage_is_compiled(&stages[idx])) {
          uint32_t stack_size = 0;
@@ -506,6 +510,7 @@ radv_rt_compile_shaders(struct radv_device *device, struct vk_pipeline_cache *ca
       .nir = traversal_module.nir,
    };
    vk_pipeline_hash_shader_stage(&pStage, NULL, traversal_stage.shader_sha1);
+   radv_shader_layout_init(pipeline_layout, MESA_SHADER_INTERSECTION, &traversal_stage.layout);
    result = radv_rt_nir_to_asm(device, cache, pCreateInfo, key, pipeline, &traversal_stage, NULL, NULL,
                                &pipeline->base.base.shaders[MESA_SHADER_INTERSECTION]);
 
index 29e053b..423394c 100644 (file)
@@ -2434,6 +2434,18 @@ RADV_DECL_PIPELINE_DOWNCAST(graphics_lib, RADV_PIPELINE_GRAPHICS_LIB)
 RADV_DECL_PIPELINE_DOWNCAST(compute, RADV_PIPELINE_COMPUTE)
 RADV_DECL_PIPELINE_DOWNCAST(ray_tracing, RADV_PIPELINE_RAY_TRACING)
 
+struct radv_shader_layout {
+   uint32_t num_sets;
+
+   struct {
+      struct radv_descriptor_set_layout *layout;
+      uint32_t dynamic_offset_start;
+   } set[MAX_SETS];
+
+   uint32_t push_constant_size;
+   bool use_dynamic_descriptors;
+};
+
 struct radv_shader_stage {
    gl_shader_stage stage;
 
@@ -2455,8 +2467,13 @@ struct radv_shader_stage {
    struct radv_shader_args args;
 
    VkPipelineCreationFeedback feedback;
+
+   struct radv_shader_layout layout;
 };
 
+void radv_shader_layout_init(const struct radv_pipeline_layout *pipeline_layout, gl_shader_stage stage,
+                             struct radv_shader_layout *layout);
+
 static inline bool
 radv_is_last_vgt_stage(const struct radv_shader_stage *stage)
 {
@@ -3075,7 +3092,7 @@ void llvm_compile_shader(const struct radv_nir_compiler_options *options, const
 struct radv_shader_info;
 
 void radv_nir_shader_info_pass(struct radv_device *device, const struct nir_shader *nir,
-                               const struct radv_pipeline_layout *layout, const struct radv_pipeline_key *pipeline_key,
+                               const struct radv_shader_layout *layout, const struct radv_pipeline_key *pipeline_key,
                                const enum radv_pipeline_type pipeline_type, bool consider_force_vrs,
                                struct radv_shader_info *info);
 
index 3ef8a88..be939c3 100644 (file)
@@ -811,11 +811,12 @@ insert_rt_case(nir_builder *b, nir_shader *shader, struct rt_variables *vars, ni
 
 nir_shader *
 radv_parse_rt_stage(struct radv_device *device, const VkPipelineShaderStageCreateInfo *sinfo,
-                    const struct radv_pipeline_key *key)
+                    const struct radv_pipeline_key *key, const struct radv_pipeline_layout *pipeline_layout)
 {
    struct radv_shader_stage rt_stage;
 
    radv_shader_stage_init(sinfo, &rt_stage, vk_to_mesa_shader_stage(sinfo->stage));
+   radv_shader_layout_init(pipeline_layout, vk_to_mesa_shader_stage(sinfo->stage), &rt_stage.layout);
 
    nir_shader *shader = radv_shader_spirv_to_nir(device, &rt_stage, key, false);
 
index 6b02b14..042cab6 100644 (file)
@@ -616,11 +616,12 @@ struct radv_shader_stage;
 void radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively);
 void radv_optimize_nir_algebraic(nir_shader *shader, bool opt_offsets);
 
-void radv_postprocess_nir(struct radv_device *device, const struct radv_pipeline_layout *pipeline_layout,
-                          const struct radv_pipeline_key *pipeline_key, struct radv_shader_stage *stage);
+void radv_postprocess_nir(struct radv_device *device, const struct radv_pipeline_key *pipeline_key,
+                          struct radv_shader_stage *stage);
 
 nir_shader *radv_parse_rt_stage(struct radv_device *device, const VkPipelineShaderStageCreateInfo *sinfo,
-                                const struct radv_pipeline_key *key);
+                                const struct radv_pipeline_key *key,
+                                const struct radv_pipeline_layout *pipeline_layout);
 
 void radv_nir_lower_rt_abi(nir_shader *shader, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
                            const struct radv_shader_args *args, const struct radv_shader_info *info,
index 9a5f437..ea7e2ef 100644 (file)
@@ -1025,13 +1025,13 @@ radv_nir_shader_info_init(gl_shader_stage stage, gl_shader_stage next_stage, str
 
 void
 radv_nir_shader_info_pass(struct radv_device *device, const struct nir_shader *nir,
-                          const struct radv_pipeline_layout *layout, const struct radv_pipeline_key *pipeline_key,
+                          const struct radv_shader_layout *layout, const struct radv_pipeline_key *pipeline_key,
                           const enum radv_pipeline_type pipeline_type, bool consider_force_vrs,
                           struct radv_shader_info *info)
 {
    struct nir_function *func = (struct nir_function *)exec_list_get_head_const(&nir->functions);
 
-   if (layout->dynamic_offset_count && (layout->dynamic_shader_stages & mesa_to_vk_shader_stage(nir->info.stage))) {
+   if (layout->use_dynamic_descriptors) {
       info->loads_push_constants = true;
       info->loads_dynamic_offsets = true;
    }