anv/pipeline: Constant fold after apply_pipeline_layout
authorJason Ekstrand <jason.ekstrand@intel.com>
Thu, 10 Jan 2019 07:47:14 +0000 (01:47 -0600)
committerJason Ekstrand <jason@jlekstrand.net>
Thu, 10 Jan 2019 20:34:00 +0000 (20:34 +0000)
Thanks to the new NIR load_descriptor intrinsic added by the UBO/SSBO
lowering series, we weren't getting UBO pushing because the UBO range
detection pass couldn't see the constants it needed.  This fixes that
problem with a quick round of constant folding.  Because we're folding
we no longer need to go out of our way to generate constants when we
lower the vulkan_resource_index intrinsic and we can make it a bit
simpler.

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
src/intel/vulkan/anv_nir_apply_pipeline_layout.c
src/intel/vulkan/anv_pipeline.c

index a0fd226..b3daf70 100644 (file)
@@ -144,19 +144,11 @@ lower_res_index_intrinsic(nir_intrinsic_instr *intrin,
    uint32_t array_size =
       state->layout->set[set].layout->binding[binding].array_size;
 
-   nir_ssa_def *block_index;
-   if (nir_src_is_const(intrin->src[0])) {
-      unsigned array_index = nir_src_as_uint(intrin->src[0]);
-      array_index = MIN2(array_index, array_size - 1);
-      block_index = nir_imm_int(b, surface_index + array_index);
-   } else {
-      block_index = nir_ssa_for_src(b, intrin->src[0], 1);
-
-      if (state->add_bounds_checks)
-         block_index = nir_umin(b, block_index, nir_imm_int(b, array_size - 1));
+   nir_ssa_def *array_index = nir_ssa_for_src(b, intrin->src[0], 1);
+   if (nir_src_is_const(intrin->src[0]) || state->add_bounds_checks)
+      array_index = nir_umin(b, array_index, nir_imm_int(b, array_size - 1));
 
-      block_index = nir_iadd(b, nir_imm_int(b, surface_index), block_index);
-   }
+   nir_ssa_def *block_index = nir_iadd_imm(b, array_index, surface_index);
 
    assert(intrin->dest.is_ssa);
    nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(block_index));
index d1efaaf..b99981d 100644 (file)
@@ -544,6 +544,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
                                     pipeline->device->robust_buffer_access,
                                     layout, nir, prog_data,
                                     &stage->bind_map);
+      NIR_PASS_V(nir, nir_opt_constant_folding);
    }
 
    if (nir->info.stage != MESA_SHADER_COMPUTE)