We will need to hash var bindings if we want to cache DXIL shaders.
Let's move this pass to dzn_pipeline.c to prepare this transition.
Reviewed-by: Jesse Natalie <jenatali@microsoft.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17140>
}
static bool
-adjust_resource_index_binding(struct nir_builder *builder, nir_instr *instr,
- void *cb_data)
-{
- struct dxil_spirv_runtime_conf *conf =
- (struct dxil_spirv_runtime_conf *)cb_data;
-
- if (instr->type != nir_instr_type_intrinsic)
- return false;
-
- nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
-
- if (intrin->intrinsic != nir_intrinsic_vulkan_resource_index)
- return false;
-
- unsigned set = nir_intrinsic_desc_set(intrin);
- unsigned binding = nir_intrinsic_binding(intrin);
-
- if (set >= conf->descriptor_set_count)
- return false;
-
- binding = conf->descriptor_sets[set].bindings[binding].base_register;
- nir_intrinsic_set_binding(intrin, binding);
-
- return true;
-}
-
-static bool
-dxil_spirv_nir_adjust_var_bindings(nir_shader *shader,
- const struct dxil_spirv_runtime_conf *conf)
-{
- uint32_t modes = nir_var_image | nir_var_uniform | nir_var_mem_ubo | nir_var_mem_ssbo;
-
- nir_foreach_variable_with_modes(var, shader, modes) {
- if (var->data.mode == nir_var_uniform) {
- const struct glsl_type *type = glsl_without_array(var->type);
-
- if (!glsl_type_is_sampler(type) && !glsl_type_is_texture(type))
- continue;
- }
-
- unsigned s = var->data.descriptor_set, b = var->data.binding;
- var->data.binding = conf->descriptor_sets[s].bindings[b].base_register;
- }
-
- return nir_shader_instructions_pass(shader, adjust_resource_index_binding,
- nir_metadata_all, (void *)conf);
-}
-
-static bool
discard_psiz_access(struct nir_builder *builder, nir_instr *instr,
void *cb_data)
{
ARRAY_SIZE(system_values));
}
- if (conf->descriptor_set_count > 0)
- NIR_PASS_V(nir, dxil_spirv_nir_adjust_var_bindings, conf);
-
*requires_runtime_data = false;
NIR_PASS(*requires_runtime_data, nir,
dxil_spirv_nir_lower_shader_system_values,
DXIL_SPIRV_YZ_FLIP_CONDITIONAL = DXIL_SPIRV_Y_FLIP_CONDITIONAL | DXIL_SPIRV_Z_FLIP_CONDITIONAL,
};
-struct dxil_spirv_vulkan_binding {
- uint32_t base_register;
-};
-
-struct dxil_spirv_vulkan_descriptor_set {
- uint32_t binding_count;
- const struct dxil_spirv_vulkan_binding *bindings;
-};
-
#define DXIL_SPIRV_MAX_VIEWPORT 16
struct dxil_spirv_runtime_conf {
uint32_t base_shader_register;
} push_constant_cbv;
- uint32_t descriptor_set_count;
- const struct dxil_spirv_vulkan_descriptor_set *descriptor_sets;
-
// Set true if vertex and instance ids have already been converted to
// zero-based. Otherwise, runtime_data will be required to lower them.
bool zero_based_vertex_instance_id;
VK_MULTIALLOC(ma);
VK_MULTIALLOC_DECL(&ma, struct dzn_pipeline_layout, layout, 1);
- VK_MULTIALLOC_DECL(&ma, struct dxil_spirv_vulkan_binding,
- bindings, binding_count);
+ VK_MULTIALLOC_DECL(&ma, uint32_t, binding_translation, binding_count);
if (!vk_multialloc_zalloc(&ma, &device->vk.alloc,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
if (!set_layout || !set_layout->binding_count)
continue;
- layout->binding_translation[s].bindings = bindings;
- bindings += set_layout->binding_count;
+ layout->binding_translation[s].base_reg = binding_translation;
+ binding_translation += set_layout->binding_count;
}
uint32_t range_count = 0, static_sampler_count = 0;
layout->set_count = pCreateInfo->setLayoutCount;
for (uint32_t j = 0; j < layout->set_count; j++) {
VK_FROM_HANDLE(dzn_descriptor_set_layout, set_layout, pCreateInfo->pSetLayouts[j]);
- struct dxil_spirv_vulkan_binding *bindings =
- (struct dxil_spirv_vulkan_binding *)layout->binding_translation[j].bindings;
+ uint32_t *binding_trans = layout->binding_translation[j].base_reg;
layout->sets[j].dynamic_buffer_count = set_layout->dynamic_buffers.count;
memcpy(layout->sets[j].range_desc_count, set_layout->range_desc_count,
sizeof(layout->sets[j].range_desc_count));
layout->binding_translation[j].binding_count = set_layout->binding_count;
for (uint32_t b = 0; b < set_layout->binding_count; b++)
- bindings[b].base_register = set_layout->bindings[b].base_shader_register;
+ binding_trans[b] = set_layout->bindings[b].base_shader_register;
static_sampler_count += set_layout->static_sampler_count;
dzn_foreach_pool_type (type) {
.register_space = DZN_REGISTER_SPACE_PUSH_CONSTANT,
.base_shader_register = 0,
},
- .descriptor_set_count = layout->set_count,
- .descriptor_sets = layout->binding_translation,
.zero_based_vertex_instance_id = false,
.yz_flip = {
.mode = yz_flip_mode,
return VK_SUCCESS;
}
+static bool
+adjust_resource_index_binding(struct nir_builder *builder, nir_instr *instr,
+ void *cb_data)
+{
+ if (instr->type != nir_instr_type_intrinsic)
+ return false;
+
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+ if (intrin->intrinsic != nir_intrinsic_vulkan_resource_index)
+ return false;
+
+ const struct dzn_pipeline_layout *layout = cb_data;
+ unsigned set = nir_intrinsic_desc_set(intrin);
+ unsigned binding = nir_intrinsic_binding(intrin);
+
+ if (set >= layout->set_count ||
+ binding >= layout->binding_translation[set].binding_count)
+ return false;
+
+ binding = layout->binding_translation[set].base_reg[binding];
+ nir_intrinsic_set_binding(intrin, binding);
+
+ return true;
+}
+
+static bool
+adjust_var_bindings(nir_shader *shader,
+ const struct dzn_pipeline_layout *layout)
+{
+ uint32_t modes = nir_var_image | nir_var_uniform | nir_var_mem_ubo | nir_var_mem_ssbo;
+
+ nir_foreach_variable_with_modes(var, shader, modes) {
+ if (var->data.mode == nir_var_uniform) {
+ const struct glsl_type *type = glsl_without_array(var->type);
+
+ if (!glsl_type_is_sampler(type) && !glsl_type_is_texture(type))
+ continue;
+ }
+
+ unsigned s = var->data.descriptor_set, b = var->data.binding;
+
+ if (s >= layout->set_count)
+ continue;
+
+ assert(b < layout->binding_translation[s].binding_count);
+ var->data.binding = layout->binding_translation[s].base_reg[b];
+ }
+
+ return nir_shader_instructions_pass(shader, adjust_resource_index_binding,
+ nir_metadata_all, (void *)layout);
+}
+
static VkResult
dzn_pipeline_compile_shader(struct dzn_device *device,
nir_shader *nir,
pipeline->templates.shaders[prev_stage].nir : NULL);
}
+ u_foreach_bit(stage, active_stage_mask) {
+ NIR_PASS_V(pipeline->templates.shaders[stage].nir, adjust_var_bindings, layout);
+ }
+
if (pipeline->templates.shaders[MESA_SHADER_VERTEX].nir) {
/* Now, declare one D3D12_INPUT_ELEMENT_DESC per VS input variable, so
* we can handle location overlaps properly.
if (ret != VK_SUCCESS)
goto out;
+ NIR_PASS_V(nir, adjust_var_bindings, layout);
+
ret = dzn_pipeline_compile_shader(device, nir, shader);
if (ret != VK_SUCCESS)
goto out;
uint32_t dynamic_buffer_count;
uint32_t range_desc_count[NUM_POOL_TYPES];
} sets[MAX_SETS];
- struct dxil_spirv_vulkan_descriptor_set binding_translation[MAX_SETS];
+ struct {
+ uint32_t binding_count;
+ uint32_t *base_reg;
+ } binding_translation[MAX_SETS];
uint32_t set_count;
uint32_t desc_count[NUM_POOL_TYPES];
struct {