intel/nir: Lower memory access bit sizes later
authorJason Ekstrand <jason@jlekstrand.net>
Sat, 28 Mar 2020 04:33:27 +0000 (23:33 -0500)
committerMarge Bot <eric+marge@anholt.net>
Fri, 3 Apr 2020 20:26:54 +0000 (20:26 +0000)
We're about to do load/store vectorization right before this but we need
that to happen after we've done a round of optimization.  Otherwise,
we'll be getting unoptimized NIR in from ANV and the vectorizer won't be
able to do anything with it.

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4367>

src/intel/compiler/brw_nir.c

index c7e9c0e..8a6cc8f 100644 (file)
@@ -861,8 +861,6 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
 
    UNUSED bool progress; /* Written by OPT */
 
-   OPT(brw_nir_lower_mem_access_bit_sizes, devinfo);
-
    OPT(nir_opt_combine_memory_barriers, combine_all_barriers, NULL);
 
    do {
@@ -872,6 +870,18 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
 
    brw_nir_optimize(nir, compiler, is_scalar, false);
 
+   if (OPT(brw_nir_lower_mem_access_bit_sizes, devinfo)) {
+      do {
+         progress = false;
+         OPT(nir_lower_pack);
+         OPT(nir_copy_prop);
+         OPT(nir_opt_dce);
+         OPT(nir_opt_cse);
+         OPT(nir_opt_algebraic);
+         OPT(nir_opt_constant_folding);
+      } while (progress);
+   }
+
    if (OPT(nir_lower_int64, nir->options->lower_int64_options))
       brw_nir_optimize(nir, compiler, is_scalar, false);