freedreno: use imm-helpers
authorErik Faye-Lund <erik.faye-lund@collabora.com>
Fri, 16 Jun 2023 12:35:12 +0000 (14:35 +0200)
committerMarge Bot <emma+marge@anholt.net>
Thu, 29 Jun 2023 07:08:19 +0000 (07:08 +0000)
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23855>

src/freedreno/ir3/ir3_nir.c
src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c
src/freedreno/ir3/ir3_nir_lower_64b.c
src/freedreno/ir3/ir3_nir_lower_io_offsets.c
src/freedreno/ir3/ir3_nir_lower_tess.c
src/freedreno/vulkan/tu_shader.cc
src/gallium/drivers/freedreno/ir3/ir3_descriptor.c

index f5c737c..60b31aa 100644 (file)
@@ -222,7 +222,7 @@ ir3_nir_lower_ssbo_size_instr(nir_builder *b, nir_instr *instr, void *data)
 {
    uint8_t ssbo_size_to_bytes_shift = *(uint8_t *) data;
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
-   return nir_ishl(b, &intr->dest.ssa, nir_imm_int(b, ssbo_size_to_bytes_shift));
+   return nir_ishl_imm(b, &intr->dest.ssa, ssbo_size_to_bytes_shift);
 }
 
 static bool
@@ -427,7 +427,7 @@ lower_subgroup_id(nir_builder *b, nir_instr *instr, void *unused)
    if (intr->intrinsic == nir_intrinsic_load_subgroup_invocation) {
       return nir_iand(
          b, nir_load_local_invocation_index(b),
-         nir_isub(b, nir_load_subgroup_size(b), nir_imm_int(b, 1)));
+         nir_iadd_imm(b, nir_load_subgroup_size(b), -1));
    } else if (intr->intrinsic == nir_intrinsic_load_subgroup_id) {
       return nir_ishr(b, nir_load_local_invocation_index(b),
                       nir_load_subgroup_id_shift_ir3(b));
index 24196c2..3c2c104 100644 (file)
@@ -307,8 +307,8 @@ lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
       uniform_offset = new_offset;
    } else {
       uniform_offset = shift > 0
-                          ? nir_ishl(b, ubo_offset, nir_imm_int(b, shift))
-                          : nir_ushr(b, ubo_offset, nir_imm_int(b, -shift));
+                          ? nir_ishl_imm(b, ubo_offset, shift)
+                          : nir_ushr_imm(b, ubo_offset, -shift);
    }
 
    assert(!(const_offset & 0x3));
index d260ff8..4770b10 100644 (file)
@@ -98,7 +98,7 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
             nir_intrinsic_set_write_mask(store, 0x3);
          nir_builder_instr_insert(b, &store->instr);
 
-         off = nir_iadd(b, off, nir_imm_intN_t(b, 8, off->bit_size));
+         off = nir_iadd_imm(b, off, 8);
       }
 
       return NIR_LOWER_INSTR_PROGRESS_REPLACE;
@@ -114,9 +114,8 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
    if (intr->intrinsic == nir_intrinsic_load_kernel_input) {
       assert(num_comp == 1);
 
-      nir_ssa_def *offset = nir_iadd(b,
-            nir_ssa_for_src(b, intr->src[0], 1),
-            nir_imm_int(b, 4));
+      nir_ssa_def *offset = nir_iadd_imm(b,
+            nir_ssa_for_src(b, intr->src[0], 1), 4);
 
       nir_ssa_def *upper = nir_load_kernel_input(b, 1, 32, offset);
 
@@ -150,7 +149,7 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
 
          components[i] = nir_pack_64_2x32(b, &load->dest.ssa);
 
-         off = nir_iadd(b, off, nir_imm_intN_t(b, 8, off->bit_size));
+         off = nir_iadd_imm(b, off, 8);
       }
    } else {
       /* The remaining (non load/store) intrinsics just get zero-
index 74b7ef8..9c017db 100644 (file)
@@ -97,9 +97,9 @@ check_and_propagate_bit_shift32(nir_builder *b, nir_alu_instr *alu_instr,
 
    /* Add or substract shift depending on the final direction (SHR vs. SHL). */
    if (shift * direction < 0)
-      shift_ssa = nir_isub(b, shift_ssa, nir_imm_int(b, abs(shift)));
+      shift_ssa = nir_iadd_imm(b, shift_ssa, -abs(shift));
    else
-      shift_ssa = nir_iadd(b, shift_ssa, nir_imm_int(b, abs(shift)));
+      shift_ssa = nir_iadd_imm(b, shift_ssa, abs(shift));
 
    return shift_ssa;
 }
@@ -161,8 +161,8 @@ scalarize_load(nir_intrinsic_instr *intrinsic, nir_builder *b)
    for (unsigned i = 0; i < intrinsic->dest.ssa.num_components; i++) {
       results[i] =
          nir_load_ssbo_ir3(b, 1, intrinsic->dest.ssa.bit_size, descriptor,
-                           nir_iadd(b, offset, nir_imm_int(b, i * comp_size)),
-                           nir_iadd(b, new_offset, nir_imm_int(b, i)),
+                           nir_iadd_imm(b, offset, i * comp_size),
+                           nir_iadd_imm(b, new_offset, i),
                            .access = nir_intrinsic_access(intrinsic),
                            .align_mul = nir_intrinsic_align_mul(intrinsic),
                            .align_offset = nir_intrinsic_align_offset(intrinsic));
@@ -241,7 +241,7 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
    if (new_offset)
       offset = new_offset;
    else
-      offset = nir_ushr(b, offset, nir_imm_int(b, shift));
+      offset = nir_ushr_imm(b, offset, shift);
 
    /* Insert the new intrinsic right before the old one. */
    nir_builder_instr_insert(b, &new_intrinsic->instr);
index f7a87ce..20f2fd7 100644 (file)
@@ -51,8 +51,7 @@ struct state {
 static nir_ssa_def *
 bitfield_extract(nir_builder *b, nir_ssa_def *v, uint32_t start, uint32_t mask)
 {
-   return nir_iand(b, nir_ushr(b, v, nir_imm_int(b, start)),
-                   nir_imm_int(b, mask));
+   return nir_iand_imm(b, nir_ushr_imm(b, v, start), mask);
 }
 
 static nir_ssa_def *
@@ -146,8 +145,8 @@ build_local_offset(nir_builder *b, struct state *state, nir_ssa_def *vertex,
    case MESA_SHADER_TESS_CTRL:
    case MESA_SHADER_GEOMETRY:
       vertex_stride = nir_load_vs_vertex_stride_ir3(b);
-      attr_offset = nir_iadd(b, nir_load_primitive_location_ir3(b, index),
-                             nir_imm_int(b, comp * 4));
+      attr_offset = nir_iadd_imm(b, nir_load_primitive_location_ir3(b, index),
+                                 comp * 4);
       break;
    default:
       unreachable("bad shader stage");
@@ -157,7 +156,7 @@ build_local_offset(nir_builder *b, struct state *state, nir_ssa_def *vertex,
 
    return nir_iadd(
       b, nir_iadd(b, primitive_offset, vertex_offset),
-      nir_iadd(b, attr_offset, nir_ishl(b, offset, nir_imm_int(b, 4))));
+      nir_iadd(b, attr_offset, nir_ishl_imm(b, offset, 4)));
 }
 
 static nir_intrinsic_instr *
@@ -417,7 +416,7 @@ build_per_vertex_offset(nir_builder *b, struct state *state,
       /* Offset is in vec4's, but we need it in unit of components for the
        * load/store_global_ir3 offset.
        */
-      offset = nir_ishl(b, offset, nir_imm_int(b, 2));
+      offset = nir_ishl_imm(b, offset, 2);
    }
 
    nir_ssa_def *vertex_offset;
@@ -428,8 +427,8 @@ build_per_vertex_offset(nir_builder *b, struct state *state,
          attr_offset = nir_imm_int(b, state->map.loc[index] + comp);
          break;
       case MESA_SHADER_TESS_EVAL:
-         attr_offset = nir_iadd(b, nir_load_primitive_location_ir3(b, index),
-                                nir_imm_int(b, comp));
+         attr_offset = nir_iadd_imm(b, nir_load_primitive_location_ir3(b, index),
+                                    comp);
          break;
       default:
          unreachable("bad shader state");
@@ -437,12 +436,12 @@ build_per_vertex_offset(nir_builder *b, struct state *state,
 
       attr_offset = nir_iadd(b, attr_offset,
                              nir_imul24(b, offset, build_tcs_out_vertices(b)));
-      vertex_offset = nir_ishl(b, vertex, nir_imm_int(b, 2));
+      vertex_offset = nir_ishl_imm(b, vertex, 2);
    } else {
       assert(location >= VARYING_SLOT_PATCH0 &&
              location <= VARYING_SLOT_TESS_MAX);
       unsigned index = location - VARYING_SLOT_PATCH0;
-      attr_offset = nir_iadd(b, nir_imm_int(b, index * 4 + comp), offset);
+      attr_offset = nir_iadd_imm(b, offset, index * 4 + comp);
       vertex_offset = nir_imm_int(b, 0);
    }
 
@@ -506,7 +505,7 @@ build_tessfactor_base(nir_builder *b, gl_varying_slot slot, uint32_t comp,
       unreachable("bad");
    }
 
-   return nir_iadd(b, patch_offset, nir_imm_int(b, offset + comp));
+   return nir_iadd_imm(b, patch_offset, offset + comp);
 }
 
 static void
@@ -886,8 +885,8 @@ lower_gs_block(nir_block *block, nir_builder *b, struct state *state)
          unsigned stream = nir_intrinsic_stream_id(intr);
          /* vertex_flags_out |= stream */
          nir_store_var(b, state->vertex_flags_out,
-                       nir_ior(b, nir_load_var(b, state->vertex_flags_out),
-                               nir_imm_int(b, stream)),
+                       nir_ior_imm(b, nir_load_var(b, state->vertex_flags_out),
+                                   stream),
                        0x1 /* .x */);
 
          copy_vars(b, &state->emit_outputs, &state->old_outputs);
@@ -895,15 +894,17 @@ lower_gs_block(nir_block *block, nir_builder *b, struct state *state)
          nir_instr_remove(&intr->instr);
 
          nir_store_var(b, state->emitted_vertex_var,
-                       nir_iadd(b, nir_load_var(b, state->emitted_vertex_var),
-                                nir_imm_int(b, 1)),
+                       nir_iadd_imm(b,
+                                    nir_load_var(b,
+                                                 state->emitted_vertex_var),
+                                                 1),
                        0x1);
 
          nir_pop_if(b, NULL);
 
          /* Increment the vertex count by 1 */
          nir_store_var(b, state->vertex_count_var,
-                       nir_iadd(b, count, nir_imm_int(b, 1)), 0x1); /* .x */
+                       nir_iadd_imm(b, count, 1), 0x1); /* .x */
          nir_store_var(b, state->vertex_flags_out, nir_imm_int(b, 0), 0x1);
 
          break;
index 2a7b44c..6620c6c 100644 (file)
@@ -156,7 +156,7 @@ lower_load_push_constant(struct tu_device *dev,
    nir_ssa_def *load =
       nir_load_uniform(b, instr->num_components,
             instr->dest.ssa.bit_size,
-            nir_ushr(b, instr->src[0].ssa, nir_imm_int(b, 2)),
+            nir_ushr_imm(b, instr->src[0].ssa, 2),
             .base = base);
 
    nir_ssa_def_rewrite_uses(&instr->dest.ssa, load);
@@ -283,7 +283,7 @@ lower_ssbo_ubo_intrinsic(struct tu_device *dev,
        intrin->intrinsic == nir_intrinsic_load_ssbo &&
        (nir_intrinsic_access(intrin) & ACCESS_CAN_REORDER) &&
        intrin->dest.ssa.bit_size > 16) {
-      descriptor_idx = nir_iadd(b, descriptor_idx, nir_imm_int(b, 1));
+      descriptor_idx = nir_iadd_imm(b, descriptor_idx, 1);
    }
 
    nir_ssa_def *results[MAX_SETS + 1] = { NULL };
@@ -374,8 +374,7 @@ build_bindless(struct tu_device *dev, nir_builder *b,
          return nir_imm_int(b, idx);
 
       nir_ssa_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
-      return nir_iadd(b, nir_imm_int(b, idx),
-                      nir_imul_imm(b, arr_index, 2));
+      return nir_iadd_imm(b, nir_imul_imm(b, arr_index, 2), idx);
    }
 
    shader->active_desc_sets |= 1u << set;
index 8b58605..2c0c4fe 100644 (file)
@@ -62,7 +62,7 @@ lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intr)
 
    unsigned set = ir3_shader_descriptor_set(b->shader->info.stage);
    nir_ssa_def *src = nir_ssa_for_src(b, intr->src[buffer_src], 1);
-   src = nir_iadd(b, src, nir_imm_int(b, desc_offset));
+   src = nir_iadd_imm(b, src, desc_offset);
    /* An out-of-bounds index into an SSBO/image array can cause a GPU fault
     * on access to the descriptor (I don't see any hw mechanism to bound the
     * access).  We could just allow the resulting iova fault (it is a read