struct radv_shader_stage;
struct radv_shader_info;
struct radv_shader_args;
+struct radv_shader_layout;
struct radv_device;
-void radv_nir_apply_pipeline_layout(nir_shader *shader, struct radv_device *device,
- const struct radv_pipeline_layout *layout, const struct radv_shader_info *info,
- const struct radv_shader_args *args);
+void radv_nir_apply_pipeline_layout(nir_shader *shader, struct radv_device *device, const struct radv_shader_info *info,
+ const struct radv_shader_args *args, const struct radv_shader_layout *layout);
void radv_nir_lower_abi(nir_shader *shader, enum amd_gfx_level gfx_level, const struct radv_shader_info *info,
const struct radv_shader_args *args, const struct radv_pipeline_key *pl_key,
const struct radv_shader_args *args;
const struct radv_shader_info *info;
- const struct radv_pipeline_layout *pipeline_layout;
+ const struct radv_shader_layout *layout;
} apply_layout_state;
static nir_ssa_def *
{
unsigned desc_set = nir_intrinsic_desc_set(intrin);
unsigned binding = nir_intrinsic_binding(intrin);
- struct radv_descriptor_set_layout *layout = state->pipeline_layout->set[desc_set].layout;
+ struct radv_descriptor_set_layout *layout = state->layout->set[desc_set].layout;
unsigned offset = layout->binding[binding].offset;
unsigned stride;
nir_ssa_def *set_ptr;
if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
- unsigned idx =
- state->pipeline_layout->set[desc_set].dynamic_offset_start + layout->binding[binding].dynamic_offset_offset;
+ unsigned idx = state->layout->set[desc_set].dynamic_offset_start + layout->binding[binding].dynamic_offset_offset;
set_ptr = get_scalar_arg(b, 1, state->args->ac.push_constants);
- offset = state->pipeline_layout->push_constant_size + idx * 16;
+ offset = state->layout->push_constant_size + idx * 16;
stride = 16;
} else {
set_ptr = load_desc_ptr(b, state, desc_set);
* VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK.
*/
if (binding.success) {
- struct radv_descriptor_set_layout *layout = state->pipeline_layout->set[binding.desc_set].layout;
+ struct radv_descriptor_set_layout *layout = state->layout->set[binding.desc_set].layout;
if (layout->binding[binding.binding].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK) {
rsrc = nir_iadd(b, nir_channel(b, rsrc, 0), nir_channel(b, rsrc, 1));
return load_inline_buffer_descriptor(b, state, rsrc);
unsigned binding_index = var->data.binding;
bool indirect = nir_deref_instr_has_indirect(deref);
- struct radv_descriptor_set_layout *layout = state->pipeline_layout->set[desc_set].layout;
+ struct radv_descriptor_set_layout *layout = state->layout->set[desc_set].layout;
struct radv_descriptor_set_binding_layout *binding = &layout->binding[binding_index];
/* Handle immutable and embedded (compile-time) samplers
}
void
-radv_nir_apply_pipeline_layout(nir_shader *shader, struct radv_device *device,
- const struct radv_pipeline_layout *layout, const struct radv_shader_info *info,
- const struct radv_shader_args *args)
+radv_nir_apply_pipeline_layout(nir_shader *shader, struct radv_device *device, const struct radv_shader_info *info,
+ const struct radv_shader_args *args, const struct radv_shader_layout *layout)
{
apply_layout_state state = {
.gfx_level = device->physical_device->rad_info.gfx_level,
.conformant_trunc_coord = device->physical_device->rad_info.conformant_trunc_coord,
.args = args,
.info = info,
- .pipeline_layout = layout,
+ .layout = layout,
};
nir_builder b;
vk_pipeline_hash_shader_stage(sinfo, NULL, out_stage->shader_sha1);
}
+void
+radv_shader_layout_init(const struct radv_pipeline_layout *pipeline_layout, gl_shader_stage stage,
+ struct radv_shader_layout *layout)
+{
+ layout->num_sets = pipeline_layout->num_sets;
+ for (unsigned i = 0; i < pipeline_layout->num_sets; i++) {
+ layout->set[i].layout = pipeline_layout->set[i].layout;
+ layout->set[i].dynamic_offset_start = pipeline_layout->set[i].dynamic_offset_start;
+ }
+
+ layout->push_constant_size = pipeline_layout->push_constant_size;
+
+ if (pipeline_layout->dynamic_offset_count &&
+ (pipeline_layout->dynamic_shader_stages & mesa_to_vk_shader_stage(stage))) {
+ layout->use_dynamic_descriptors = true;
+ }
+}
+
static const struct vk_ycbcr_conversion_state *
ycbcr_conversion_lookup(const void *data, uint32_t set, uint32_t binding, uint32_t array_index)
{
- const struct radv_pipeline_layout *layout = data;
+ const struct radv_shader_layout *layout = data;
const struct radv_descriptor_set_layout *set_layout = layout->set[set].layout;
const struct vk_ycbcr_conversion_state *ycbcr_samplers = radv_immutable_ycbcr_samplers(set_layout, binding);
}
void
-radv_postprocess_nir(struct radv_device *device, const struct radv_pipeline_layout *pipeline_layout,
- const struct radv_pipeline_key *pipeline_key, struct radv_shader_stage *stage)
+radv_postprocess_nir(struct radv_device *device, const struct radv_pipeline_key *pipeline_key,
+ struct radv_shader_stage *stage)
{
enum amd_gfx_level gfx_level = device->physical_device->rad_info.gfx_level;
bool progress;
.modes_N_comps = nir_var_mem_ubo | nir_var_mem_ssbo});
progress = false;
- NIR_PASS(progress, stage->nir, nir_vk_lower_ycbcr_tex, ycbcr_conversion_lookup, pipeline_layout);
+ NIR_PASS(progress, stage->nir, nir_vk_lower_ycbcr_tex, ycbcr_conversion_lookup, &stage->layout);
/* Gather info in the case that nir_vk_lower_ycbcr_tex might have emitted resinfo instructions. */
if (progress)
nir_shader_gather_info(stage->nir, nir_shader_get_entrypoint(stage->nir));
if (stage->nir->info.uses_resource_info_query)
NIR_PASS(_, stage->nir, ac_nir_lower_resinfo, gfx_level);
- NIR_PASS_V(stage->nir, radv_nir_apply_pipeline_layout, device, pipeline_layout, &stage->info, &stage->args);
+ NIR_PASS_V(stage->nir, radv_nir_apply_pipeline_layout, device, &stage->info, &stage->args, &stage->layout);
if (!pipeline_key->optimisations_disabled) {
NIR_PASS(_, stage->nir, nir_opt_shrink_vectors);
static struct radv_shader *
radv_compile_cs(struct radv_device *device, struct vk_pipeline_cache *cache, struct radv_shader_stage *cs_stage,
- const struct radv_pipeline_key *pipeline_key, struct radv_pipeline_layout *pipeline_layout,
- bool keep_executable_info, bool keep_statistic_info, bool is_internal,
- struct radv_shader_binary **cs_binary)
+ const struct radv_pipeline_key *pipeline_key, bool keep_executable_info, bool keep_statistic_info,
+ bool is_internal, struct radv_shader_binary **cs_binary)
{
struct radv_shader *cs_shader;
/* Run the shader info pass. */
radv_nir_shader_info_init(cs_stage->stage, MESA_SHADER_NONE, &cs_stage->info);
- radv_nir_shader_info_pass(device, cs_stage->nir, pipeline_layout, pipeline_key, RADV_PIPELINE_COMPUTE, false,
+ radv_nir_shader_info_pass(device, cs_stage->nir, &cs_stage->layout, pipeline_key, RADV_PIPELINE_COMPUTE, false,
&cs_stage->info);
radv_declare_shader_args(device, pipeline_key, &cs_stage->info, MESA_SHADER_COMPUTE, MESA_SHADER_NONE,
cs_stage->info.inline_push_constant_mask = cs_stage->args.ac.inline_push_const_mask;
/* Postprocess NIR. */
- radv_postprocess_nir(device, pipeline_layout, pipeline_key, cs_stage);
+ radv_postprocess_nir(device, pipeline_key, cs_stage);
if (radv_can_dump_shader(device, cs_stage->nir, false))
nir_print_shader(cs_stage->nir, stderr);
int64_t pipeline_start = os_time_get_nano();
radv_shader_stage_init(pStage, &cs_stage, MESA_SHADER_COMPUTE);
+ radv_shader_layout_init(pipeline_layout, MESA_SHADER_COMPUTE, &cs_stage.layout);
radv_hash_shaders(hash, &cs_stage, 1, pipeline_layout, pipeline_key,
radv_get_hash_flags(device, keep_statistic_info));
int64_t stage_start = os_time_get_nano();
pipeline->base.shaders[MESA_SHADER_COMPUTE] =
- radv_compile_cs(device, cache, &cs_stage, pipeline_key, pipeline_layout, keep_executable_info,
- keep_statistic_info, pipeline->base.is_internal, &cs_binary);
+ radv_compile_cs(device, cache, &cs_stage, pipeline_key, keep_executable_info, keep_statistic_info,
+ pipeline->base.is_internal, &cs_binary);
cs_stage.feedback.duration += os_time_get_nano() - stage_start;
static void
radv_fill_shader_info(struct radv_device *device, const enum radv_pipeline_type pipeline_type,
- struct radv_pipeline_layout *pipeline_layout, const struct radv_pipeline_key *pipeline_key,
- struct radv_shader_stage *stages, VkShaderStageFlagBits active_nir_stages)
+ const struct radv_pipeline_key *pipeline_key, struct radv_shader_stage *stages,
+ VkShaderStageFlagBits active_nir_stages)
{
radv_foreach_stage(i, active_nir_stages)
{
consider_force_vrs = radv_consider_force_vrs(device, &stages[i], &stages[MESA_SHADER_FRAGMENT]);
}
- radv_nir_shader_info_pass(device, stages[i].nir, pipeline_layout, pipeline_key, pipeline_type, consider_force_vrs,
- &stages[i].info);
+ radv_nir_shader_info_pass(device, stages[i].nir, &stages[i].layout, pipeline_key, pipeline_type,
+ consider_force_vrs, &stages[i].info);
}
radv_nir_shader_info_link(device, pipeline_key, stages);
static struct radv_shader *
radv_create_gs_copy_shader(struct radv_device *device, struct vk_pipeline_cache *cache,
struct radv_shader_stage *gs_stage, const struct radv_pipeline_key *pipeline_key,
- const struct radv_pipeline_layout *pipeline_layout, bool keep_executable_info,
- bool keep_statistic_info, struct radv_shader_binary **gs_copy_binary)
+ bool keep_executable_info, bool keep_statistic_info,
+ struct radv_shader_binary **gs_copy_binary)
{
const struct radv_shader_info *gs_info = &gs_stage->info;
ac_nir_gs_output_info output_info = {
.shader_sha1 = {0},
};
radv_nir_shader_info_init(gs_copy_stage.stage, MESA_SHADER_FRAGMENT, &gs_copy_stage.info);
- radv_nir_shader_info_pass(device, nir, pipeline_layout, pipeline_key, RADV_PIPELINE_GRAPHICS, false,
+ radv_nir_shader_info_pass(device, nir, &gs_stage->layout, pipeline_key, RADV_PIPELINE_GRAPHICS, false,
&gs_copy_stage.info);
gs_copy_stage.info.wave_size = 64; /* Wave32 not supported. */
gs_copy_stage.info.workgroup_size = 64; /* HW VS: separate waves, no workgroups */
static void
radv_graphics_shaders_nir_to_asm(struct radv_device *device, struct vk_pipeline_cache *cache,
struct radv_shader_stage *stages, const struct radv_pipeline_key *pipeline_key,
- const struct radv_pipeline_layout *pipeline_layout, bool keep_executable_info,
- bool keep_statistic_info, VkShaderStageFlagBits active_nir_stages,
- struct radv_shader **shaders, struct radv_shader_binary **binaries,
- struct radv_shader **gs_copy_shader, struct radv_shader_binary **gs_copy_binary)
+ bool keep_executable_info, bool keep_statistic_info,
+ VkShaderStageFlagBits active_nir_stages, struct radv_shader **shaders,
+ struct radv_shader_binary **binaries, struct radv_shader **gs_copy_shader,
+ struct radv_shader_binary **gs_copy_binary)
{
for (int s = MESA_VULKAN_SHADER_STAGES - 1; s >= 0; s--) {
if (!(active_nir_stages & (1 << s)))
&stages[s].info);
if (s == MESA_SHADER_GEOMETRY && !stages[s].info.is_ngg) {
- *gs_copy_shader =
- radv_create_gs_copy_shader(device, cache, &stages[MESA_SHADER_GEOMETRY], pipeline_key, pipeline_layout,
- keep_executable_info, keep_statistic_info, gs_copy_binary);
+ *gs_copy_shader = radv_create_gs_copy_shader(device, cache, &stages[MESA_SHADER_GEOMETRY], pipeline_key,
+ keep_executable_info, keep_statistic_info, gs_copy_binary);
}
stages[s].feedback.duration += os_time_get_nano() - stage_start;
continue;
radv_shader_stage_init(sinfo, &stages[s], s);
+ radv_shader_layout_init(&lib->layout, s, &stages[s].layout);
}
/* Import the NIR shaders (after SPIRV->NIR). */
stages[s].entrypoint = nir_shader_get_entrypoint(stages[s].nir)->function->name;
memcpy(stages[s].shader_sha1, retained_shaders->stages[s].shader_sha1, sizeof(stages[s].shader_sha1));
+ radv_shader_layout_init(&lib->layout, s, &stages[s].layout);
+
stages[s].feedback.flags |= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT;
stages[s].feedback.duration += os_time_get_nano() - stage_start;
static void
radv_graphics_shaders_compile(struct radv_device *device, struct vk_pipeline_cache *cache,
struct radv_shader_stage *stages, const struct radv_pipeline_key *pipeline_key,
- struct radv_pipeline_layout *pipeline_layout, bool keep_executable_info,
- bool keep_statistic_info, bool is_internal,
+ bool keep_executable_info, bool keep_statistic_info, bool is_internal,
struct radv_retained_shaders *retained_shaders, bool noop_fs,
struct radv_shader **shaders, struct radv_shader_binary **binaries,
struct radv_shader **gs_copy_shader, struct radv_shader_binary **gs_copy_binary)
radv_nir_lower_poly_line_smooth(stages[MESA_SHADER_FRAGMENT].nir, pipeline_key);
}
- radv_fill_shader_info(device, RADV_PIPELINE_GRAPHICS, pipeline_layout, pipeline_key, stages, active_nir_stages);
+ radv_fill_shader_info(device, RADV_PIPELINE_GRAPHICS, pipeline_key, stages, active_nir_stages);
radv_declare_pipeline_args(device, stages, pipeline_key, active_nir_stages);
{
int64_t stage_start = os_time_get_nano();
- radv_postprocess_nir(device, pipeline_layout, pipeline_key, &stages[i]);
+ radv_postprocess_nir(device, pipeline_key, &stages[i]);
stages[i].feedback.duration += os_time_get_nano() - stage_start;
}
/* Compile NIR shaders to AMD assembly. */
- radv_graphics_shaders_nir_to_asm(device, cache, stages, pipeline_key, pipeline_layout, keep_executable_info,
- keep_statistic_info, active_nir_stages, shaders, binaries, gs_copy_shader,
- gs_copy_binary);
+ radv_graphics_shaders_nir_to_asm(device, cache, stages, pipeline_key, keep_executable_info, keep_statistic_info,
+ active_nir_stages, shaders, binaries, gs_copy_shader, gs_copy_binary);
if (keep_executable_info) {
for (int i = 0; i < MESA_VULKAN_SHADER_STAGES; ++i) {
continue;
radv_shader_stage_init(sinfo, &stages[stage], stage);
+ radv_shader_layout_init(pipeline_layout, stage, &stages[stage].layout);
}
radv_pipeline_load_retained_shaders(device, pipeline, pCreateInfo, stages);
const bool noop_fs = radv_pipeline_needs_noop_fs(pipeline, pipeline_key);
- radv_graphics_shaders_compile(device, cache, stages, pipeline_key, pipeline_layout, keep_executable_info,
- keep_statistic_info, pipeline->base.is_internal, retained_shaders, noop_fs,
- pipeline->base.shaders, binaries, &pipeline->base.gs_copy_shader, &gs_copy_binary);
+ radv_graphics_shaders_compile(device, cache, stages, pipeline_key, keep_executable_info, keep_statistic_info,
+ pipeline->base.is_internal, retained_shaders, noop_fs, pipeline->base.shaders,
+ binaries, &pipeline->base.gs_copy_shader, &gs_copy_binary);
if (!radv_pipeline_create_ps_epilog(device, pipeline, pipeline_key, lib_flags, &ps_epilog_binary))
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
radv_rt_fill_stage_info(struct radv_device *device, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
struct radv_ray_tracing_stage *stages, struct radv_pipeline_key *key)
{
+ RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
uint32_t idx;
for (idx = 0; idx < pCreateInfo->stageCount; idx++) {
stages[idx].stage = vk_to_mesa_shader_stage(pCreateInfo->pStages[idx].stage);
struct radv_shader_stage stage;
radv_shader_stage_init(&pCreateInfo->pStages[idx], &stage, stages[idx].stage);
+ radv_shader_layout_init(pipeline_layout, stages[idx].stage, &stage.layout);
radv_hash_shaders(stages[idx].sha1, &stage, 1, NULL, key, radv_get_hash_flags(device, false));
}
struct radv_shader **out_shader)
{
struct radv_shader_binary *binary;
- RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
bool keep_executable_info = radv_pipeline_capture_shaders(device, pipeline->base.base.create_flags);
bool keep_statistic_info = radv_pipeline_capture_shader_stats(device, pipeline->base.base.create_flags);
/* Gather shader info. */
nir_shader_gather_info(stage->nir, nir_shader_get_entrypoint(stage->nir));
radv_nir_shader_info_init(stage->stage, MESA_SHADER_NONE, &stage->info);
- radv_nir_shader_info_pass(device, stage->nir, pipeline_layout, pipeline_key, RADV_PIPELINE_RAY_TRACING, false,
+ radv_nir_shader_info_pass(device, stage->nir, &stage->layout, pipeline_key, RADV_PIPELINE_RAY_TRACING, false,
&stage->info);
/* Declare shader arguments. */
temp_stage.nir = shaders[i];
radv_nir_lower_rt_abi(temp_stage.nir, pCreateInfo, &temp_stage.args, &stage->info, stack_size, i > 0);
radv_optimize_nir(temp_stage.nir, pipeline_key->optimisations_disabled);
- radv_postprocess_nir(device, pipeline_layout, pipeline_key, &temp_stage);
+ radv_postprocess_nir(device, pipeline_key, &temp_stage);
if (radv_can_dump_shader(device, temp_stage.nir, false))
nir_print_shader(temp_stage.nir, stderr);
const struct radv_pipeline_key *key, struct radv_ray_tracing_pipeline *pipeline,
struct radv_serialized_shader_arena_block *capture_replay_handles)
{
+ RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
+
if (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR)
return VK_PIPELINE_COMPILE_REQUIRED;
VkResult result = VK_SUCCESS;
int64_t stage_start = os_time_get_nano();
struct radv_shader_stage stage;
radv_shader_stage_init(&pCreateInfo->pStages[idx], &stage, stages[idx].stage);
+ radv_shader_layout_init(pipeline_layout, stages[idx].stage, &stage.layout);
if (stages[idx].shader)
goto feedback;
/* precompile the shader */
- stage.nir = radv_parse_rt_stage(device, &pCreateInfo->pStages[idx], key);
+ stage.nir = radv_parse_rt_stage(device, &pCreateInfo->pStages[idx], key, pipeline_layout);
if (radv_ray_tracing_stage_is_compiled(&stages[idx])) {
uint32_t stack_size = 0;
.nir = traversal_module.nir,
};
vk_pipeline_hash_shader_stage(&pStage, NULL, traversal_stage.shader_sha1);
+ radv_shader_layout_init(pipeline_layout, MESA_SHADER_INTERSECTION, &traversal_stage.layout);
result = radv_rt_nir_to_asm(device, cache, pCreateInfo, key, pipeline, &traversal_stage, NULL, NULL,
&pipeline->base.base.shaders[MESA_SHADER_INTERSECTION]);
RADV_DECL_PIPELINE_DOWNCAST(compute, RADV_PIPELINE_COMPUTE)
RADV_DECL_PIPELINE_DOWNCAST(ray_tracing, RADV_PIPELINE_RAY_TRACING)
+struct radv_shader_layout {
+ uint32_t num_sets;
+
+ struct {
+ struct radv_descriptor_set_layout *layout;
+ uint32_t dynamic_offset_start;
+ } set[MAX_SETS];
+
+ uint32_t push_constant_size;
+ bool use_dynamic_descriptors;
+};
+
struct radv_shader_stage {
gl_shader_stage stage;
struct radv_shader_args args;
VkPipelineCreationFeedback feedback;
+
+ struct radv_shader_layout layout;
};
+void radv_shader_layout_init(const struct radv_pipeline_layout *pipeline_layout, gl_shader_stage stage,
+ struct radv_shader_layout *layout);
+
static inline bool
radv_is_last_vgt_stage(const struct radv_shader_stage *stage)
{
struct radv_shader_info;
void radv_nir_shader_info_pass(struct radv_device *device, const struct nir_shader *nir,
- const struct radv_pipeline_layout *layout, const struct radv_pipeline_key *pipeline_key,
+ const struct radv_shader_layout *layout, const struct radv_pipeline_key *pipeline_key,
const enum radv_pipeline_type pipeline_type, bool consider_force_vrs,
struct radv_shader_info *info);
nir_shader *
radv_parse_rt_stage(struct radv_device *device, const VkPipelineShaderStageCreateInfo *sinfo,
- const struct radv_pipeline_key *key)
+ const struct radv_pipeline_key *key, const struct radv_pipeline_layout *pipeline_layout)
{
struct radv_shader_stage rt_stage;
radv_shader_stage_init(sinfo, &rt_stage, vk_to_mesa_shader_stage(sinfo->stage));
+ radv_shader_layout_init(pipeline_layout, vk_to_mesa_shader_stage(sinfo->stage), &rt_stage.layout);
nir_shader *shader = radv_shader_spirv_to_nir(device, &rt_stage, key, false);
void radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively);
void radv_optimize_nir_algebraic(nir_shader *shader, bool opt_offsets);
-void radv_postprocess_nir(struct radv_device *device, const struct radv_pipeline_layout *pipeline_layout,
- const struct radv_pipeline_key *pipeline_key, struct radv_shader_stage *stage);
+void radv_postprocess_nir(struct radv_device *device, const struct radv_pipeline_key *pipeline_key,
+ struct radv_shader_stage *stage);
nir_shader *radv_parse_rt_stage(struct radv_device *device, const VkPipelineShaderStageCreateInfo *sinfo,
- const struct radv_pipeline_key *key);
+ const struct radv_pipeline_key *key,
+ const struct radv_pipeline_layout *pipeline_layout);
void radv_nir_lower_rt_abi(nir_shader *shader, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
const struct radv_shader_args *args, const struct radv_shader_info *info,
void
radv_nir_shader_info_pass(struct radv_device *device, const struct nir_shader *nir,
- const struct radv_pipeline_layout *layout, const struct radv_pipeline_key *pipeline_key,
+ const struct radv_shader_layout *layout, const struct radv_pipeline_key *pipeline_key,
const enum radv_pipeline_type pipeline_type, bool consider_force_vrs,
struct radv_shader_info *info)
{
struct nir_function *func = (struct nir_function *)exec_list_get_head_const(&nir->functions);
- if (layout->dynamic_offset_count && (layout->dynamic_shader_stages & mesa_to_vk_shader_stage(nir->info.stage))) {
+ if (layout->use_dynamic_descriptors) {
info->loads_push_constants = true;
info->loads_dynamic_offsets = true;
}