dzn: Don't delegate binding translation to dxil_spirv_nir_passes()
authorBoris Brezillon <boris.brezillon@collabora.com>
Tue, 21 Jun 2022 09:53:05 +0000 (02:53 -0700)
committerMarge Bot <emma+marge@anholt.net>
Tue, 28 Jun 2022 13:02:23 +0000 (13:02 +0000)
We will need to hash var bindings if we want to cache DXIL shaders.
Let's move this pass to dzn_pipeline.c to prepare this transition.

Reviewed-by: Jesse Natalie <jenatali@microsoft.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17140>

src/microsoft/spirv_to_dxil/dxil_spirv_nir.c
src/microsoft/spirv_to_dxil/spirv_to_dxil.h
src/microsoft/vulkan/dzn_descriptor_set.c
src/microsoft/vulkan/dzn_pipeline.c
src/microsoft/vulkan/dzn_private.h

index 7011d9d..dfc5f06 100644 (file)
@@ -370,55 +370,6 @@ dxil_spirv_nir_lower_yz_flip(nir_shader *shader,
 }
 
 static bool
-adjust_resource_index_binding(struct nir_builder *builder, nir_instr *instr,
-                              void *cb_data)
-{
-   struct dxil_spirv_runtime_conf *conf =
-      (struct dxil_spirv_runtime_conf *)cb_data;
-
-   if (instr->type != nir_instr_type_intrinsic)
-      return false;
-
-   nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
-
-   if (intrin->intrinsic != nir_intrinsic_vulkan_resource_index)
-      return false;
-
-   unsigned set = nir_intrinsic_desc_set(intrin);
-   unsigned binding = nir_intrinsic_binding(intrin);
-
-   if (set >= conf->descriptor_set_count)
-      return false;
-
-   binding = conf->descriptor_sets[set].bindings[binding].base_register;
-   nir_intrinsic_set_binding(intrin, binding);
-
-   return true;
-}
-
-static bool
-dxil_spirv_nir_adjust_var_bindings(nir_shader *shader,
-                                   const struct dxil_spirv_runtime_conf *conf)
-{
-   uint32_t modes = nir_var_image | nir_var_uniform | nir_var_mem_ubo | nir_var_mem_ssbo;
-
-   nir_foreach_variable_with_modes(var, shader, modes) {
-      if (var->data.mode == nir_var_uniform) {
-         const struct glsl_type *type = glsl_without_array(var->type);
-
-         if (!glsl_type_is_sampler(type) && !glsl_type_is_texture(type))
-            continue;
-      }
-
-      unsigned s = var->data.descriptor_set, b = var->data.binding;
-      var->data.binding = conf->descriptor_sets[s].bindings[b].base_register;
-   }
-
-   return nir_shader_instructions_pass(shader, adjust_resource_index_binding,
-                                       nir_metadata_all, (void *)conf);
-}
-
-static bool
 discard_psiz_access(struct nir_builder *builder, nir_instr *instr,
                     void *cb_data)
 {
@@ -665,9 +616,6 @@ dxil_spirv_nir_passes(nir_shader *nir,
                  ARRAY_SIZE(system_values));
    }
 
-   if (conf->descriptor_set_count > 0)
-      NIR_PASS_V(nir, dxil_spirv_nir_adjust_var_bindings, conf);
-
    *requires_runtime_data = false;
    NIR_PASS(*requires_runtime_data, nir,
             dxil_spirv_nir_lower_shader_system_values,
index f22863c..a1ee16e 100644 (file)
@@ -125,15 +125,6 @@ enum dxil_spirv_yz_flip_mode {
    DXIL_SPIRV_YZ_FLIP_CONDITIONAL = DXIL_SPIRV_Y_FLIP_CONDITIONAL | DXIL_SPIRV_Z_FLIP_CONDITIONAL,
 };
 
-struct dxil_spirv_vulkan_binding {
-   uint32_t base_register;
-};
-
-struct dxil_spirv_vulkan_descriptor_set {
-   uint32_t binding_count;
-   const struct dxil_spirv_vulkan_binding *bindings;
-};
-
 #define DXIL_SPIRV_MAX_VIEWPORT 16
 
 struct dxil_spirv_runtime_conf {
@@ -147,9 +138,6 @@ struct dxil_spirv_runtime_conf {
       uint32_t base_shader_register;
    } push_constant_cbv;
 
-   uint32_t descriptor_set_count;
-   const struct dxil_spirv_vulkan_descriptor_set *descriptor_sets;
-
    // Set true if vertex and instance ids have already been converted to
    // zero-based. Otherwise, runtime_data will be required to lower them.
    bool zero_based_vertex_instance_id;
index d9121ab..9450046 100644 (file)
@@ -527,8 +527,7 @@ dzn_pipeline_layout_create(struct dzn_device *device,
 
    VK_MULTIALLOC(ma);
    VK_MULTIALLOC_DECL(&ma, struct dzn_pipeline_layout, layout, 1);
-   VK_MULTIALLOC_DECL(&ma, struct dxil_spirv_vulkan_binding,
-                      bindings, binding_count);
+   VK_MULTIALLOC_DECL(&ma, uint32_t, binding_translation, binding_count);
 
    if (!vk_multialloc_zalloc(&ma, &device->vk.alloc,
                              VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
@@ -542,8 +541,8 @@ dzn_pipeline_layout_create(struct dzn_device *device,
       if (!set_layout || !set_layout->binding_count)
          continue;
 
-      layout->binding_translation[s].bindings = bindings;
-      bindings += set_layout->binding_count;
+      layout->binding_translation[s].base_reg = binding_translation;
+      binding_translation += set_layout->binding_count;
    }
 
    uint32_t range_count = 0, static_sampler_count = 0;
@@ -557,15 +556,14 @@ dzn_pipeline_layout_create(struct dzn_device *device,
    layout->set_count = pCreateInfo->setLayoutCount;
    for (uint32_t j = 0; j < layout->set_count; j++) {
       VK_FROM_HANDLE(dzn_descriptor_set_layout, set_layout, pCreateInfo->pSetLayouts[j]);
-      struct dxil_spirv_vulkan_binding *bindings =
-         (struct dxil_spirv_vulkan_binding *)layout->binding_translation[j].bindings;
+      uint32_t *binding_trans = layout->binding_translation[j].base_reg;
 
       layout->sets[j].dynamic_buffer_count = set_layout->dynamic_buffers.count;
       memcpy(layout->sets[j].range_desc_count, set_layout->range_desc_count,
              sizeof(layout->sets[j].range_desc_count));
       layout->binding_translation[j].binding_count = set_layout->binding_count;
       for (uint32_t b = 0; b < set_layout->binding_count; b++)
-         bindings[b].base_register = set_layout->bindings[b].base_shader_register;
+         binding_trans[b] = set_layout->bindings[b].base_shader_register;
 
       static_sampler_count += set_layout->static_sampler_count;
       dzn_foreach_pool_type (type) {
index b99fc45..a04842a 100644 (file)
@@ -152,8 +152,6 @@ dzn_pipeline_get_nir_shader(struct dzn_device *device,
          .register_space = DZN_REGISTER_SPACE_PUSH_CONSTANT,
          .base_shader_register = 0,
       },
-      .descriptor_set_count = layout->set_count,
-      .descriptor_sets = layout->binding_translation,
       .zero_based_vertex_instance_id = false,
       .yz_flip = {
          .mode = yz_flip_mode,
@@ -181,6 +179,59 @@ dzn_pipeline_get_nir_shader(struct dzn_device *device,
    return VK_SUCCESS;
 }
 
+static bool
+adjust_resource_index_binding(struct nir_builder *builder, nir_instr *instr,
+                              void *cb_data)
+{
+   if (instr->type != nir_instr_type_intrinsic)
+      return false;
+
+   nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+   if (intrin->intrinsic != nir_intrinsic_vulkan_resource_index)
+      return false;
+
+   const struct dzn_pipeline_layout *layout = cb_data;
+   unsigned set = nir_intrinsic_desc_set(intrin);
+   unsigned binding = nir_intrinsic_binding(intrin);
+
+   if (set >= layout->set_count ||
+       binding >= layout->binding_translation[set].binding_count)
+      return false;
+
+   binding = layout->binding_translation[set].base_reg[binding];
+   nir_intrinsic_set_binding(intrin, binding);
+
+   return true;
+}
+
+static bool
+adjust_var_bindings(nir_shader *shader,
+                    const struct dzn_pipeline_layout *layout)
+{
+   uint32_t modes = nir_var_image | nir_var_uniform | nir_var_mem_ubo | nir_var_mem_ssbo;
+
+   nir_foreach_variable_with_modes(var, shader, modes) {
+      if (var->data.mode == nir_var_uniform) {
+         const struct glsl_type *type = glsl_without_array(var->type);
+
+         if (!glsl_type_is_sampler(type) && !glsl_type_is_texture(type))
+            continue;
+      }
+
+      unsigned s = var->data.descriptor_set, b = var->data.binding;
+
+      if (s >= layout->set_count)
+         continue;
+
+      assert(b < layout->binding_translation[s].binding_count);
+      var->data.binding = layout->binding_translation[s].base_reg[b];
+   }
+
+   return nir_shader_instructions_pass(shader, adjust_resource_index_binding,
+                                       nir_metadata_all, (void *)layout);
+}
+
 static VkResult
 dzn_pipeline_compile_shader(struct dzn_device *device,
                             nir_shader *nir,
@@ -378,6 +429,10 @@ dzn_graphics_pipeline_compile_shaders(struct dzn_device *device,
                           pipeline->templates.shaders[prev_stage].nir : NULL);
    }
 
+   u_foreach_bit(stage, active_stage_mask) {
+      NIR_PASS_V(pipeline->templates.shaders[stage].nir, adjust_var_bindings, layout);
+   }
+
    if (pipeline->templates.shaders[MESA_SHADER_VERTEX].nir) {
       /* Now, declare one D3D12_INPUT_ELEMENT_DESC per VS input variable, so
        * we can handle location overlaps properly.
@@ -1554,6 +1609,8 @@ dzn_compute_pipeline_create(struct dzn_device *device,
    if (ret != VK_SUCCESS)
       goto out;
 
+   NIR_PASS_V(nir, adjust_var_bindings, layout);
+
    ret = dzn_pipeline_compile_shader(device, nir, shader);
    if (ret != VK_SUCCESS)
       goto out;
index f36e57a..6a2c1f9 100644 (file)
@@ -650,7 +650,10 @@ struct dzn_pipeline_layout {
       uint32_t dynamic_buffer_count;
       uint32_t range_desc_count[NUM_POOL_TYPES];
    } sets[MAX_SETS];
-   struct dxil_spirv_vulkan_descriptor_set binding_translation[MAX_SETS];
+   struct {
+      uint32_t binding_count;
+      uint32_t *base_reg;
+   } binding_translation[MAX_SETS];
    uint32_t set_count;
    uint32_t desc_count[NUM_POOL_TYPES];
    struct {