nir: Rename nir_mem_access_size_align::align_mul to align
authorFaith Ekstrand <faith.ekstrand@collabora.com>
Wed, 1 Mar 2023 15:10:20 +0000 (09:10 -0600)
committerMarge Bot <emma+marge@anholt.net>
Fri, 3 Mar 2023 02:00:39 +0000 (02:00 +0000)
It's a simple alignment so calling it align_mul is a bit misleading.

Suggested-by: M Henning <drawoc@darkrefraction.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21524>

src/compiler/nir/nir.h
src/compiler/nir/nir_lower_mem_access_bit_sizes.c
src/intel/compiler/brw_nir.c

index cad51a2..7bf23df 100644 (file)
@@ -4949,7 +4949,7 @@ bool nir_lower_explicit_io(nir_shader *shader,
 typedef struct {
    uint8_t num_components;
    uint8_t bit_size;
-   uint16_t align_mul;
+   uint16_t align;
 } nir_mem_access_size_align;
 
 typedef nir_mem_access_size_align
index f20ac11..4d68761 100644 (file)
@@ -94,10 +94,10 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
                                offset_is_const, cb_data);
 
    assert(util_is_power_of_two_nonzero(align_mul));
-   assert(util_is_power_of_two_nonzero(requested.align_mul));
+   assert(util_is_power_of_two_nonzero(requested.align));
    if (requested.num_components == num_components &&
        requested.bit_size == bit_size &&
-       requested.align_mul <= whole_align)
+       requested.align <= whole_align)
       return false;
 
    /* Otherwise, we have to break it into chunks.  We could end up with as
@@ -115,22 +115,22 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
                                            offset_is_const, cb_data);
 
       unsigned chunk_bytes;
-      assert(util_is_power_of_two_nonzero(requested.align_mul));
-      if (align_mul < requested.align_mul) {
+      assert(util_is_power_of_two_nonzero(requested.align));
+      if (align_mul < requested.align) {
          /* For this case, we need to be able to shift the value so we assume
           * there's at most one component.
           */
          assert(requested.num_components == 1);
-         assert(requested.bit_size >= requested.align_mul * 8);
+         assert(requested.bit_size >= requested.align * 8);
 
-         uint64_t align_mask = requested.align_mul - 1;
+         uint64_t align_mask = requested.align - 1;
          nir_ssa_def *chunk_offset = nir_iadd_imm(b, offset, chunk_start);
          nir_ssa_def *pad = nir_iand_imm(b, chunk_offset, align_mask);
          chunk_offset = nir_iand_imm(b, chunk_offset, ~align_mask);
 
          nir_intrinsic_instr *load =
             dup_mem_intrinsic(b, intrin, chunk_offset,
-                              requested.align_mul, 0, NULL,
+                              requested.align, 0, NULL,
                               requested.num_components, requested.bit_size);
 
          nir_ssa_def *shifted =
@@ -139,9 +139,9 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
          chunk_bytes = MIN2(bytes_left, align_mul);
          assert(num_chunks < ARRAY_SIZE(chunks));
          chunks[num_chunks++] = nir_u2uN(b, shifted, chunk_bytes * 8);
-      } else if (chunk_align_offset % requested.align_mul) {
+      } else if (chunk_align_offset % requested.align) {
          /* In this case, we know how much to adjust the offset */
-         uint32_t delta = chunk_align_offset % requested.align_mul;
+         uint32_t delta = chunk_align_offset % requested.align;
          nir_ssa_def *chunk_offset =
             nir_iadd_imm(b, offset, chunk_start - (int)delta);
 
@@ -224,10 +224,10 @@ lower_mem_store(nir_builder *b, nir_intrinsic_instr *intrin,
                                offset_is_const, cb_data);
 
    assert(util_is_power_of_two_nonzero(align_mul));
-   assert(util_is_power_of_two_nonzero(requested.align_mul));
+   assert(util_is_power_of_two_nonzero(requested.align));
    if (requested.num_components == num_components &&
        requested.bit_size == bit_size &&
-       requested.align_mul <= whole_align &&
+       requested.align <= whole_align &&
        writemask == BITFIELD_MASK(num_components))
       return false;
 
@@ -263,9 +263,9 @@ lower_mem_store(nir_builder *b, nir_intrinsic_instr *intrin,
          requested.num_components * (requested.bit_size / 8);
       assert(chunk_bytes <= max_chunk_bytes);
 
-      assert(util_is_power_of_two_nonzero(requested.align_mul));
-      assert(requested.align_mul <= align_mul);
-      assert((chunk_align_offset % requested.align_mul) == 0);
+      assert(util_is_power_of_two_nonzero(requested.align));
+      assert(requested.align <= align_mul);
+      assert((chunk_align_offset % requested.align) == 0);
 
       nir_ssa_def *packed = nir_extract_bits(b, &value, 1, chunk_start * 8,
                                              requested.num_components,
index fcf1976..71f8e10 100644 (file)
@@ -1283,7 +1283,7 @@ get_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
          return (nir_mem_access_size_align) {
             .bit_size = 32,
             .num_components = comps32,
-            .align_mul = 4,
+            .align = 4,
          };
       }
       break;
@@ -1293,7 +1293,7 @@ get_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
          return (nir_mem_access_size_align) {
             .bit_size = 32,
             .num_components = 1,
-            .align_mul = 4,
+            .align = 4,
          };
       }
       break;
@@ -1328,7 +1328,7 @@ get_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
       return (nir_mem_access_size_align) {
          .bit_size = bytes * 8,
          .num_components = 1,
-         .align_mul = 1,
+         .align = 1,
       };
    } else {
       bytes = MIN2(bytes, 16);
@@ -1336,7 +1336,7 @@ get_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
          .bit_size = 32,
          .num_components = is_scratch ? 1 :
                            is_load ? DIV_ROUND_UP(bytes, 4) : bytes / 4,
-         .align_mul = 4,
+         .align = 4,
       };
    }
 }