nir/nir_lower_uniforms_to_ubo: Set better alignments on our new instructions.
authorEric Anholt <eric@anholt.net>
Wed, 9 Sep 2020 17:21:49 +0000 (10:21 -0700)
committerMarge Bot <eric+marge@anholt.net>
Wed, 30 Sep 2020 19:53:43 +0000 (19:53 +0000)
The change on freedreno is in the noise.

Reviewed-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6612>

src/compiler/nir/nir_lower_uniforms_to_ubo.c

index 4d1a5a7..47e92fa 100644 (file)
@@ -69,7 +69,24 @@ lower_instr(nir_intrinsic_instr *instr, nir_builder *b, int multiplier)
       load->src[0] = nir_src_for_ssa(ubo_idx);
       load->src[1] = nir_src_for_ssa(ubo_offset);
       assert(instr->dest.ssa.bit_size >= 8);
-      nir_intrinsic_set_align(load, instr->dest.ssa.bit_size / 8, 0);
+
+      /* If it's const, set the alignment to our known constant offset.  If
+       * not, set it to a pessimistic value based on the multiplier (or the
+       * scalar size, for qword loads).
+       *
+       * We could potentially set up stricter alignments for indirects by
+       * knowing what features are enabled in the APIs (see comment in
+       * nir_lower_ubo_vec4.c)
+       */
+      if (nir_src_is_const(instr->src[0])) {
+         nir_intrinsic_set_align(load, NIR_ALIGN_MUL_MAX,
+                                 (nir_src_as_uint(instr->src[0]) +
+                                  nir_intrinsic_base(instr) * multiplier) %
+                                 NIR_ALIGN_MUL_MAX);
+      } else {
+         nir_intrinsic_set_align(load, MAX2(multiplier,
+                                            instr->dest.ssa.bit_size / 8), 0);
+      }
       nir_ssa_dest_init(&load->instr, &load->dest,
                         load->num_components, instr->dest.ssa.bit_size,
                         instr->dest.ssa.name);