nir: Get rid of nir_dest_bit_size()
authorFaith Ekstrand <faith.ekstrand@collabora.com>
Mon, 14 Aug 2023 16:08:07 +0000 (11:08 -0500)
committerMarge Bot <emma+marge@anholt.net>
Mon, 14 Aug 2023 21:22:53 +0000 (21:22 +0000)
We could add a nir_def_bit_size() helper but we use ->bit_size about 3x
as often as nir_dest_bit_size() today so that's a major Coccinelle
refactor anyway and this doesn't make it much worse.  Most of this
commit was generated byt the following semantic patch:

    @@
    expression D;
    @@

    <...
    -nir_dest_bit_size(D)
    +D.ssa.bit_size
    ...

Some manual fixup was needed, especially in cpp files where Coccinelle
tends to give up the moment it sees any interesting C++.

Acked-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24674>

78 files changed:
src/amd/llvm/ac_nir_to_llvm.c
src/asahi/compiler/agx_compile.c
src/asahi/compiler/agx_nir_lower_address.c
src/asahi/compiler/agx_nir_lower_interpolation.c
src/asahi/compiler/agx_nir_lower_load_mask.c
src/asahi/compiler/agx_nir_lower_texture.c
src/asahi/compiler/agx_nir_lower_ubo.c
src/asahi/lib/agx_nir_lower_msaa.c
src/asahi/lib/agx_nir_lower_sample_intrinsics.c
src/asahi/lib/agx_nir_lower_tilebuffer.c
src/asahi/lib/agx_nir_lower_vbo.c
src/broadcom/compiler/nir_to_vir.c
src/broadcom/compiler/v3d_nir_lower_load_store_bitsize.c
src/compiler/nir/nir.h
src/compiler/nir/nir_gather_info.c
src/compiler/nir/nir_loop_analyze.c
src/compiler/nir/nir_lower_amul.c
src/compiler/nir/nir_lower_bool_to_bitsize.c
src/compiler/nir/nir_lower_flrp.c
src/compiler/nir/nir_lower_helper_writes.c
src/compiler/nir/nir_lower_image.c
src/compiler/nir/nir_lower_image_atomics_to_global.c
src/compiler/nir/nir_lower_int64.c
src/compiler/nir/nir_lower_io.c
src/compiler/nir/nir_lower_memcpy.c
src/compiler/nir/nir_lower_robust_access.c
src/compiler/nir/nir_lower_task_shader.c
src/compiler/nir/nir_lower_tex.c
src/compiler/nir/nir_lower_vec_to_regs.c
src/compiler/nir/nir_opt_if.c
src/compiler/nir/nir_opt_intrinsics.c
src/compiler/nir/nir_opt_phi_precision.c
src/compiler/nir/nir_opt_undef.c
src/compiler/nir/nir_split_64bit_vec3_and_vec4.c
src/compiler/nir/nir_validate.c
src/freedreno/ir3/ir3_compiler_nir.c
src/freedreno/ir3/ir3_image.c
src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c
src/freedreno/ir3/ir3_nir_lower_64b.c
src/freedreno/ir3/ir3_nir_lower_wide_load_store.c
src/gallium/auxiliary/gallivm/lp_bld_nir.c
src/gallium/auxiliary/nir/nir_to_tgsi.c
src/gallium/drivers/asahi/agx_nir_lower_sysvals.c
src/gallium/drivers/r600/sfn/sfn_nir_legalize_image_load_store.cpp
src/gallium/drivers/r600/sfn/sfn_nir_lower_64bit.cpp
src/gallium/drivers/radeonsi/si_shader_nir.c
src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c
src/gallium/drivers/zink/zink_compiler.c
src/gallium/drivers/zink/zink_lower_cubemap_to_array.c
src/gallium/frontends/clover/nir/invocation.cpp
src/gallium/frontends/rusticl/rusticl_nir.c
src/intel/compiler/brw_fs_nir.cpp
src/intel/compiler/brw_mesh.cpp
src/intel/compiler/brw_nir_analyze_ubo_ranges.c
src/intel/compiler/brw_nir_blockify_uniform_loads.c
src/intel/compiler/brw_nir_lower_conversions.c
src/intel/compiler/brw_nir_lower_sparse.c
src/intel/compiler/brw_vec4_gs_nir.cpp
src/intel/compiler/brw_vec4_nir.cpp
src/intel/compiler/brw_vec4_tcs.cpp
src/intel/compiler/brw_vec4_tes.cpp
src/intel/vulkan/anv_nir_apply_pipeline_layout.c
src/intel/vulkan/anv_nir_push_descriptor_analysis.c
src/intel/vulkan_hasvk/anv_nir_lower_ycbcr_textures.c
src/microsoft/clc/clc_compiler.c
src/microsoft/clc/clc_nir.c
src/microsoft/compiler/dxil_nir.c
src/microsoft/compiler/nir_to_dxil.c
src/microsoft/spirv_to_dxil/dxil_spirv_nir.c
src/panfrost/compiler/bi_lower_divergent_indirects.c
src/panfrost/compiler/bifrost_compile.c
src/panfrost/midgard/midgard_compile.c
src/panfrost/midgard/nir_fuse_io_16.c
src/panfrost/util/pan_lower_64bit_intrin.c
src/panfrost/util/pan_lower_framebuffer.c
src/panfrost/util/pan_lower_sample_position.c
src/panfrost/vulkan/panvk_vX_shader.c
src/vulkan/runtime/vk_nir_convert_ycbcr.c

index e2d1bdb..5f6bf26 100644 (file)
@@ -3057,7 +3057,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
          values[i] = ctx->args->workgroup_ids[i].used
                         ? ac_get_arg(&ctx->ac, ctx->args->workgroup_ids[i])
                         : ctx->ac.i32_0;
-         if (nir_dest_bit_size(instr->dest) == 64)
+         if (instr->dest.ssa.bit_size == 64)
             values[i] = LLVMBuildZExt(ctx->ac.builder, values[i], ctx->ac.i64, "");
       }
 
@@ -3156,7 +3156,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
          result = ac_build_load_invariant(&ctx->ac,
             ac_get_ptr_arg(&ctx->ac, ctx->args, ctx->args->num_work_groups), ctx->ac.i32_0);
       }
-      if (nir_dest_bit_size(instr->dest) == 64)
+      if (instr->dest.ssa.bit_size == 64)
          result = LLVMBuildZExt(ctx->ac.builder, result, LLVMVectorType(ctx->ac.i64, 3), "");
       break;
    case nir_intrinsic_load_local_invocation_index:
index 8995e71..42f7162 100644 (file)
@@ -644,7 +644,7 @@ agx_load_compute_dimension(agx_builder *b, agx_index dst,
                            nir_intrinsic_instr *instr, enum agx_sr base)
 {
    unsigned dim = nir_dest_num_components(instr->dest);
-   unsigned size = nir_dest_bit_size(instr->dest);
+   unsigned size = instr->dest.ssa.bit_size;
    assert(size == 16 || size == 32);
 
    agx_index srcs[] = {
@@ -739,7 +739,7 @@ agx_emit_local_load(agx_builder *b, agx_index dst, nir_intrinsic_instr *instr)
    agx_index index = agx_zero(); /* TODO: optimize address arithmetic */
    assert(base.size == AGX_SIZE_16);
 
-   enum agx_format format = format_for_bitsize(nir_dest_bit_size(instr->dest));
+   enum agx_format format = format_for_bitsize(instr->dest.ssa.bit_size);
    unsigned nr = nir_dest_num_components(instr->dest);
    unsigned mask = BITFIELD_MASK(nr);
 
@@ -1226,7 +1226,7 @@ static agx_instr *
 agx_emit_alu(agx_builder *b, nir_alu_instr *instr)
 {
    unsigned srcs = nir_op_infos[instr->op].num_inputs;
-   unsigned sz = nir_dest_bit_size(instr->dest.dest);
+   unsigned sz = instr->dest.dest.ssa.bit_size;
    unsigned src_sz = srcs ? nir_src_bit_size(instr->src[0].src) : 0;
    ASSERTED unsigned comps = nir_dest_num_components(instr->dest.dest);
 
index 4a39d08..a082690 100644 (file)
@@ -270,7 +270,7 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
 
    unsigned bitsize = intr->intrinsic == nir_intrinsic_store_global
                          ? nir_src_bit_size(intr->src[0])
-                         : nir_dest_bit_size(intr->dest);
+                         : intr->dest.ssa.bit_size;
    enum pipe_format format = format_for_bitsize(bitsize);
    unsigned format_shift = util_logbase2(util_format_get_blocksize(format));
 
@@ -312,7 +312,7 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
    nir_def *repl = NULL;
    bool has_dest = (intr->intrinsic != nir_intrinsic_store_global);
    unsigned num_components = has_dest ? nir_dest_num_components(intr->dest) : 0;
-   unsigned bit_size = has_dest ? nir_dest_bit_size(intr->dest) : 0;
+   unsigned bit_size = has_dest ? intr->dest.ssa.bit_size : 0;
 
    if (intr->intrinsic == nir_intrinsic_load_global) {
       repl =
index f20ad84..2cd758b 100644 (file)
@@ -127,7 +127,7 @@ interpolate_channel(nir_builder *b, nir_intrinsic_instr *load, unsigned channel)
       .interp_mode = interp_mode_for_load(load), .io_semantics = sem);
 
    if (load->intrinsic == nir_intrinsic_load_input) {
-      assert(nir_dest_bit_size(load->dest) == 32);
+      assert(load->dest.ssa.bit_size == 32);
       return interpolate_flat(b, coefficients);
    } else {
       nir_intrinsic_instr *bary = nir_src_as_intrinsic(load->src[0]);
@@ -136,7 +136,7 @@ interpolate_channel(nir_builder *b, nir_intrinsic_instr *load, unsigned channel)
          b, coefficients, bary->src[0].ssa,
          nir_intrinsic_interp_mode(bary) != INTERP_MODE_NOPERSPECTIVE);
 
-      return nir_f2fN(b, interp, nir_dest_bit_size(load->dest));
+      return nir_f2fN(b, interp, load->dest.ssa.bit_size);
    }
 }
 
index ae1b299..572f942 100644 (file)
@@ -26,7 +26,7 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
       return false;
 
    b->cursor = nir_before_instr(instr);
-   unsigned bit_size = nir_dest_bit_size(intr->dest);
+   unsigned bit_size = intr->dest.ssa.bit_size;
    nir_def *comps[4] = {NULL};
 
    for (unsigned c = 0; c < intr->num_components; ++c) {
index 6fede1a..ab38260 100644 (file)
@@ -206,8 +206,8 @@ load_rgb32(nir_builder *b, nir_tex_instr *tex, nir_def *coordinate)
       nir_iand_imm(b, nir_ushr_imm(b, desc_hi, 2), BITFIELD64_MASK(36));
    nir_def *base = nir_ishl_imm(b, base_shr4, 4);
 
-   nir_def *raw = nir_load_constant_agx(b, 3, nir_dest_bit_size(tex->dest),
-                                        base, nir_imul_imm(b, coordinate, 3),
+   nir_def *raw = nir_load_constant_agx(b, 3, tex->dest.ssa.bit_size, base,
+                                        nir_imul_imm(b, coordinate, 3),
                                         .format = AGX_INTERNAL_FORMAT_I32);
 
    /* Set alpha to 1 (in the appropriate format) */
@@ -745,7 +745,7 @@ lower_images(nir_builder *b, nir_instr *instr, UNUSED void *data)
       nir_def_rewrite_uses(
          &intr->dest.ssa,
          txs_for_image(b, intr, nir_dest_num_components(intr->dest),
-                       nir_dest_bit_size(intr->dest)));
+                       intr->dest.ssa.bit_size));
       return true;
 
    case nir_intrinsic_image_texel_address:
index 61a72d8..ded86c8 100644 (file)
@@ -24,9 +24,9 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
    nir_def *offset = nir_ssa_for_src(b, *nir_get_io_offset_src(intr), 1);
    nir_def *address =
       nir_iadd(b, nir_load_ubo_base_agx(b, ubo_index), nir_u2u64(b, offset));
-   nir_def *value = nir_load_global_constant(
-      b, address, nir_intrinsic_align(intr), intr->num_components,
-      nir_dest_bit_size(intr->dest));
+   nir_def *value =
+      nir_load_global_constant(b, address, nir_intrinsic_align(intr),
+                               intr->num_components, intr->dest.ssa.bit_size);
 
    nir_def_rewrite_uses(&intr->dest.ssa, value);
    return true;
index 635e98a..aba8404 100644 (file)
@@ -20,7 +20,7 @@ lower_wrapped(nir_builder *b, nir_instr *instr, void *data)
 
    switch (intr->intrinsic) {
    case nir_intrinsic_load_sample_id: {
-      unsigned size = nir_dest_bit_size(intr->dest);
+      unsigned size = intr->dest.ssa.bit_size;
       nir_def_rewrite_uses(&intr->dest.ssa, nir_u2uN(b, sample_id, size));
       nir_instr_remove(instr);
       return true;
index 34ba027..154f80f 100644 (file)
@@ -52,7 +52,7 @@ lower_to_sample(nir_builder *b, nir_instr *instr, void *_)
          xy[i] = nir_fmul_imm(b, nir_u2f16(b, nibble), 1.0 / 16.0);
 
          /* Upconvert if necessary */
-         xy[i] = nir_f2fN(b, xy[i], nir_dest_bit_size(intr->dest));
+         xy[i] = nir_f2fN(b, xy[i], intr->dest.ssa.bit_size);
       }
 
       /* Collect and rewrite */
@@ -81,7 +81,7 @@ lower_to_sample(nir_builder *b, nir_instr *instr, void *_)
       nir_def *old = &intr->dest.ssa;
 
       nir_def *lowered = nir_load_barycentric_at_sample(
-         b, nir_dest_bit_size(intr->dest), nir_load_sample_id(b),
+         b, intr->dest.ssa.bit_size, nir_load_sample_id(b),
          .interp_mode = nir_intrinsic_interp_mode(intr));
 
       nir_def_rewrite_uses_after(old, lowered, lowered->parent_instr);
index c457f41..7b40c0a 100644 (file)
@@ -266,7 +266,7 @@ tib_impl(nir_builder *b, nir_instr *instr, void *data)
 
       return NIR_LOWER_INSTR_PROGRESS_REPLACE;
    } else {
-      uint8_t bit_size = nir_dest_bit_size(intr->dest);
+      uint8_t bit_size = intr->dest.ssa.bit_size;
 
       /* Loads from non-existent render targets are undefined in NIR but not
        * possible to encode in the hardware, delete them.
index eb0d120..3e80252 100644 (file)
@@ -149,7 +149,7 @@ pass(struct nir_builder *b, nir_instr *instr, void *data)
       util_format_is_pure_uint(interchange_format) &&
             !util_format_is_pure_uint(attrib.format)
          ? (interchange_align * 8)
-         : nir_dest_bit_size(intr->dest);
+         : intr->dest.ssa.bit_size;
 
    /* Non-UNORM R10G10B10A2 loaded as a scalar and unpacked */
    if (interchange_format == PIPE_FORMAT_R32_UINT && !desc->is_array)
@@ -190,7 +190,7 @@ pass(struct nir_builder *b, nir_instr *instr, void *data)
       b, interchange_comps, interchange_register_size, base, stride_offset_el,
       .format = interchange_format, .base = shift);
 
-   unsigned dest_size = nir_dest_bit_size(intr->dest);
+   unsigned dest_size = intr->dest.ssa.bit_size;
 
    /* Unpack but do not convert non-native non-array formats */
    if (is_rgb10_a2(desc) && interchange_format == PIPE_FORMAT_R32_UINT) {
index 0433741..b5e037a 100644 (file)
@@ -627,7 +627,7 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
                                                        tmu_op, has_index,
                                                        &tmu_writes);
                 } else if (is_load) {
-                        type_size = nir_dest_bit_size(instr->dest) / 8;
+                        type_size = instr->dest.ssa.bit_size / 8;
                 }
 
                 /* For atomics we use 32bit except for CMPXCHG, that we need
@@ -2694,7 +2694,7 @@ static void
 ntq_emit_load_uniform(struct v3d_compile *c, nir_intrinsic_instr *instr)
 {
         /* We scalarize general TMU access for anything that is not 32-bit. */
-        assert(nir_dest_bit_size(instr->dest) == 32 ||
+        assert(instr->dest.ssa.bit_size == 32 ||
                instr->num_components == 1);
 
         /* Try to emit ldunif if possible, otherwise fallback to general TMU */
@@ -2726,7 +2726,7 @@ ntq_emit_inline_ubo_load(struct v3d_compile *c, nir_intrinsic_instr *instr)
                 return false;
 
         /* We scalarize general TMU access for anything that is not 32-bit */
-        assert(nir_dest_bit_size(instr->dest) == 32 ||
+        assert(instr->dest.ssa.bit_size == 32 ||
                instr->num_components == 1);
 
         if (nir_src_is_const(instr->src[1])) {
@@ -3108,7 +3108,7 @@ ntq_emit_load_unifa(struct v3d_compile *c, nir_intrinsic_instr *instr)
          * use ldunifa if we can verify alignment, which we can only do for
          * loads with a constant offset.
          */
-        uint32_t bit_size = nir_dest_bit_size(instr->dest);
+        uint32_t bit_size = instr->dest.ssa.bit_size;
         uint32_t value_skips = 0;
         if (bit_size < 32) {
                 if (dynamic_src) {
index 742a690..6602f08 100644 (file)
@@ -116,7 +116,7 @@ static bool
 lower_load_bitsize(nir_builder *b,
                    nir_intrinsic_instr *intr)
 {
-        uint32_t bit_size = nir_dest_bit_size(intr->dest);
+        uint32_t bit_size = intr->dest.ssa.bit_size;
         if (bit_size == 32)
                 return false;
 
index 9b56242..cc9ff70 100644 (file)
@@ -1083,12 +1083,6 @@ nir_src_is_divergent(nir_src src)
 }
 
 static inline unsigned
-nir_dest_bit_size(nir_dest dest)
-{
-   return dest.ssa.bit_size;
-}
-
-static inline unsigned
 nir_dest_num_components(nir_dest dest)
 {
    return dest.ssa.num_components;
index b07ff01..c391030 100644 (file)
@@ -853,9 +853,9 @@ gather_alu_info(nir_alu_instr *instr, nir_shader *shader)
          shader->info.bit_sizes_int |= nir_src_bit_size(instr->src[i].src);
    }
    if (nir_alu_type_get_base_type(info->output_type) == nir_type_float)
-      shader->info.bit_sizes_float |= nir_dest_bit_size(instr->dest.dest);
+      shader->info.bit_sizes_float |= instr->dest.dest.ssa.bit_size;
    else
-      shader->info.bit_sizes_int |= nir_dest_bit_size(instr->dest.dest);
+      shader->info.bit_sizes_int |= instr->dest.dest.ssa.bit_size;
 }
 
 static void
index c31c964..e344c04 100644 (file)
@@ -188,9 +188,9 @@ instr_cost(loop_info_state *state, nir_instr *instr,
    }
 
    if (alu->op == nir_op_flrp) {
-      if ((options->lower_flrp16 && nir_dest_bit_size(alu->dest.dest) == 16) ||
-          (options->lower_flrp32 && nir_dest_bit_size(alu->dest.dest) == 32) ||
-          (options->lower_flrp64 && nir_dest_bit_size(alu->dest.dest) == 64))
+      if ((options->lower_flrp16 && alu->dest.dest.ssa.bit_size == 16) ||
+          (options->lower_flrp32 && alu->dest.dest.ssa.bit_size == 32) ||
+          (options->lower_flrp64 && alu->dest.dest.ssa.bit_size == 64))
          cost *= 3;
    }
 
@@ -199,11 +199,11 @@ instr_cost(loop_info_state *state, nir_instr *instr,
     * There are no 64-bit ops that don't have a 64-bit thing as their
     * destination or first source.
     */
-   if (nir_dest_bit_size(alu->dest.dest) < 64 &&
+   if (alu->dest.dest.ssa.bit_size < 64 &&
        nir_src_bit_size(alu->src[0].src) < 64)
       return cost;
 
-   bool is_fp64 = nir_dest_bit_size(alu->dest.dest) == 64 &&
+   bool is_fp64 = alu->dest.dest.ssa.bit_size == 64 &&
                   nir_alu_type_get_base_type(info->output_type) == nir_type_float;
    for (unsigned i = 0; i < info->num_inputs; i++) {
       if (nir_src_bit_size(alu->src[i].src) == 64 &&
index 42b656e..ed773a1 100644 (file)
@@ -260,7 +260,7 @@ nir_lower_amul(nir_shader *shader,
             if (alu->op != nir_op_amul)
                continue;
 
-            if (nir_dest_bit_size(alu->dest.dest) <= 32)
+            if (alu->dest.dest.ssa.bit_size <= 32)
                alu->op = nir_op_imul24;
             else
                alu->op = nir_op_imul;
index 7e6cb9f..6511fac 100644 (file)
@@ -112,7 +112,7 @@ lower_alu_instr(nir_builder *b, nir_alu_instr *alu)
    case nir_op_iand:
    case nir_op_ior:
    case nir_op_ixor:
-      if (nir_dest_bit_size(alu->dest.dest) > 1)
+      if (alu->dest.dest.ssa.bit_size > 1)
          return false; /* Not a boolean instruction */
       FALLTHROUGH;
 
@@ -135,7 +135,7 @@ lower_alu_instr(nir_builder *b, nir_alu_instr *alu)
 
    case nir_op_bcsel:
       /* bcsel may be choosing between boolean sources too */
-      if (nir_dest_bit_size(alu->dest.dest) == 1)
+      if (alu->dest.dest.ssa.bit_size == 1)
          make_sources_canonical(b, alu, 1);
       break;
 
@@ -340,7 +340,7 @@ lower_load_const_instr(nir_load_const_instr *load)
 static bool
 lower_phi_instr(nir_builder *b, nir_phi_instr *phi)
 {
-   if (nir_dest_bit_size(phi->dest) != 1)
+   if (phi->dest.ssa.bit_size != 1)
       return false;
 
    /* Ensure all phi sources have a canonical bit-size. We choose the
index d081bf0..f9f8302 100644 (file)
@@ -367,7 +367,7 @@ convert_flrp_instruction(nir_builder *bld,
                          bool always_precise)
 {
    bool have_ffma = false;
-   unsigned bit_size = nir_dest_bit_size(alu->dest.dest);
+   unsigned bit_size = alu->dest.dest.ssa.bit_size;
 
    if (bit_size == 16)
       have_ffma = !bld->shader->options->lower_ffma16;
index c59489c..eded211 100644 (file)
@@ -77,7 +77,7 @@ lower(nir_builder *b, nir_instr *instr, void *data)
    if (has_dest) {
       nir_push_else(b, NULL);
       undef = nir_undef(b, nir_dest_num_components(intr->dest),
-                        nir_dest_bit_size(intr->dest));
+                        intr->dest.ssa.bit_size);
    }
 
    nir_pop_if(b, NULL);
index 9ff710f..e6e6489 100644 (file)
@@ -206,7 +206,7 @@ lower_image_instr(nir_builder *b, nir_instr *instr, void *state)
    case nir_intrinsic_bindless_image_samples: {
       if (options->lower_image_samples_to_one) {
          b->cursor = nir_after_instr(&intrin->instr);
-         nir_def *samples = nir_imm_intN_t(b, 1, nir_dest_bit_size(intrin->dest));
+         nir_def *samples = nir_imm_intN_t(b, 1, intrin->dest.ssa.bit_size);
          nir_def_rewrite_uses(&intrin->dest.ssa, samples);
          return true;
       }
index d469083..45f1570 100644 (file)
@@ -43,7 +43,7 @@ lower(nir_builder *b, nir_instr *instr, UNUSED void *_)
    b->cursor = nir_before_instr(instr);
    nir_atomic_op atomic_op = nir_intrinsic_atomic_op(intr);
    enum pipe_format format = nir_intrinsic_format(intr);
-   unsigned bit_size = nir_dest_bit_size(intr->dest);
+   unsigned bit_size = intr->dest.ssa.bit_size;
 
    /* Even for "formatless" access, we know the size of the texel accessed,
     * since it's the size of the atomic. We can use that to synthesize a
index 3fd60d5..b9d322d 100644 (file)
@@ -1060,11 +1060,11 @@ lower_int64_alu_instr(nir_builder *b, nir_alu_instr *alu)
    case nir_op_i2f64:
    case nir_op_i2f32:
    case nir_op_i2f16:
-      return lower_2f(b, src[0], nir_dest_bit_size(alu->dest.dest), true);
+      return lower_2f(b, src[0], alu->dest.dest.ssa.bit_size, true);
    case nir_op_u2f64:
    case nir_op_u2f32:
    case nir_op_u2f16:
-      return lower_2f(b, src[0], nir_dest_bit_size(alu->dest.dest), false);
+      return lower_2f(b, src[0], alu->dest.dest.ssa.bit_size, false);
    case nir_op_f2i64:
    case nir_op_f2u64:
       return lower_f2(b, src[0], alu->op == nir_op_f2i64);
index 604018c..b2989ef 100644 (file)
@@ -537,7 +537,7 @@ lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
    }
 
    /* None of the supported APIs allow interpolation on 64-bit things */
-   assert(nir_dest_bit_size(intrin->dest) <= 32);
+   assert(intrin->dest.ssa.bit_size <= 32);
 
    nir_intrinsic_op bary_op;
    switch (intrin->intrinsic) {
@@ -2844,8 +2844,8 @@ is_dual_slot(nir_intrinsic_instr *intrin)
              nir_src_num_components(intrin->src[0]) >= 3;
    }
 
-   return nir_dest_bit_size(intrin->dest) == 64 &&
-          nir_dest_num_components(intrin->dest) >= 3;
+   return intrin->dest.ssa.bit_size == 64 &&
+   nir_dest_num_components(intrin->dest) >= 3;
 }
 
 /**
index ecf2cac..31f27c8 100644 (file)
@@ -55,7 +55,7 @@ memcpy_load_deref_elem(nir_builder *b, nir_deref_instr *parent,
 {
    nir_deref_instr *deref;
 
-   index = nir_i2iN(b, index, nir_dest_bit_size(parent->dest));
+   index = nir_i2iN(b, index, parent->dest.ssa.bit_size);
    assert(parent->deref_type == nir_deref_type_cast);
    deref = nir_build_deref_ptr_as_array(b, parent, index);
 
@@ -76,7 +76,7 @@ memcpy_store_deref_elem(nir_builder *b, nir_deref_instr *parent,
 {
    nir_deref_instr *deref;
 
-   index = nir_i2iN(b, index, nir_dest_bit_size(parent->dest));
+   index = nir_i2iN(b, index, parent->dest.ssa.bit_size);
    assert(parent->deref_type == nir_deref_type_cast);
    deref = nir_build_deref_ptr_as_array(b, parent, index);
    nir_store_deref(b, deref, value, ~0);
index 26394e8..2cf291e 100644 (file)
@@ -63,7 +63,7 @@ lower_buffer_load(nir_builder *b,
                   nir_intrinsic_instr *instr,
                   const nir_lower_robust_access_options *opts)
 {
-   uint32_t type_sz = nir_dest_bit_size(instr->dest) / 8;
+   uint32_t type_sz = instr->dest.ssa.bit_size / 8;
    nir_def *size;
    nir_def *index = instr->src[0].ssa;
 
@@ -96,7 +96,7 @@ lower_buffer_shared(nir_builder *b, nir_intrinsic_instr *instr)
    uint32_t type_sz, offset_src;
    if (instr->intrinsic == nir_intrinsic_load_shared) {
       offset_src = 0;
-      type_sz = nir_dest_bit_size(instr->dest) / 8;
+      type_sz = instr->dest.ssa.bit_size / 8;
    } else if (instr->intrinsic == nir_intrinsic_store_shared) {
       offset_src = 1;
       type_sz = nir_src_bit_size(instr->src[0]) / 8;
index 9c3ff55..1b4ef78 100644 (file)
@@ -376,7 +376,7 @@ requires_payload_in_shared(nir_shader *shader, bool atomics, bool small_types)
                   return true;
                break;
             case nir_intrinsic_load_task_payload:
-               if (small_types && nir_dest_bit_size(intrin->dest) < 32)
+               if (small_types && intrin->dest.ssa.bit_size < 32)
                   return true;
                break;
             case nir_intrinsic_store_task_payload:
index ac84d39..fff37bf 100644 (file)
@@ -315,14 +315,14 @@ sample_plane(nir_builder *b, nir_tex_instr *tex, int plane,
                                                        nir_imm_int(b, plane));
    plane_tex->op = nir_texop_tex;
    plane_tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
-   plane_tex->dest_type = nir_type_float | nir_dest_bit_size(tex->dest);
+   plane_tex->dest_type = nir_type_float | tex->dest.ssa.bit_size;
    plane_tex->coord_components = 2;
 
    plane_tex->texture_index = tex->texture_index;
    plane_tex->sampler_index = tex->sampler_index;
 
    nir_def_init(&plane_tex->instr, &plane_tex->dest.ssa, 4,
-                nir_dest_bit_size(tex->dest));
+                tex->dest.ssa.bit_size);
 
    nir_builder_instr_insert(b, &plane_tex->instr);
 
@@ -369,7 +369,7 @@ convert_yuv_to_rgb(nir_builder *b, nir_tex_instr *tex,
       }
    }
 
-   unsigned bit_size = nir_dest_bit_size(tex->dest);
+   unsigned bit_size = tex->dest.ssa.bit_size;
 
    nir_def *offset =
       nir_vec4(b,
@@ -888,7 +888,7 @@ lower_tex_to_txd(nir_builder *b, nir_tex_instr *tex)
 
    nir_def_init(&txd->instr, &txd->dest.ssa,
                 nir_dest_num_components(tex->dest),
-                nir_dest_bit_size(tex->dest));
+                tex->dest.ssa.bit_size);
    nir_builder_instr_insert(b, &txd->instr);
    nir_def_rewrite_uses(&tex->dest.ssa, &txd->dest.ssa);
    nir_instr_remove(&tex->instr);
@@ -928,7 +928,7 @@ lower_txb_to_txl(nir_builder *b, nir_tex_instr *tex)
 
    nir_def_init(&txl->instr, &txl->dest.ssa,
                 nir_dest_num_components(tex->dest),
-                nir_dest_bit_size(tex->dest));
+                tex->dest.ssa.bit_size);
    nir_builder_instr_insert(b, &txl->instr);
    nir_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa);
    nir_instr_remove(&tex->instr);
index 226d2ef..6d111b7 100644 (file)
@@ -174,7 +174,7 @@ try_coalesce(nir_builder *b, nir_def *reg, nir_alu_instr *vec,
    /* ... so we can replace it with the bigger destination accommodating the
     * whole vector that will be masked for the store.
     */
-   unsigned bit_size = nir_dest_bit_size(vec->dest.dest);
+   unsigned bit_size = vec->dest.dest.ssa.bit_size;
    assert(bit_size == src_alu->dest.dest.ssa.bit_size);
    nir_def_init(&src_alu->instr, &src_alu->dest.dest.ssa, dest_components,
                 bit_size);
@@ -214,7 +214,7 @@ lower(nir_builder *b, nir_instr *instr, void *data_)
    if (need_reg) {
       /* We'll replace with a register. Declare one for the purpose. */
       nir_def *reg = nir_decl_reg(b, num_components,
-                                  nir_dest_bit_size(vec->dest.dest), 0);
+                                  vec->dest.dest.ssa.bit_size, 0);
 
       unsigned finished_write_mask = 0;
       for (unsigned i = 0; i < num_components; i++) {
index 8fd07ac..c61757a 100644 (file)
@@ -665,7 +665,7 @@ opt_simplify_bcsel_of_phi(nir_builder *b, nir_loop *loop)
 
       nir_def_init(&phi->instr, &phi->dest.ssa,
                    nir_dest_num_components(bcsel->dest.dest),
-                   nir_dest_bit_size(bcsel->dest.dest));
+                   bcsel->dest.dest.ssa.bit_size);
 
       b->cursor = nir_after_phis(header_block);
       nir_builder_instr_insert(b, &phi->instr);
index d004d2c..541842c 100644 (file)
@@ -223,7 +223,7 @@ opt_intrinsics_alu(nir_builder *b, nir_alu_instr *alu,
       break;
    case nir_op_iand:
    case nir_op_ior:
-      if (nir_dest_bit_size(alu->dest.dest) == 1 && options->optimize_quad_vote_to_reduce)
+      if (alu->dest.dest.ssa.bit_size == 1 && options->optimize_quad_vote_to_reduce)
          replacement = try_opt_quad_vote(b, alu, block_has_discard);
       break;
    default:
index a232d2c..31e809f 100644 (file)
@@ -183,7 +183,7 @@ widening_conversion_op(nir_instr *instr, unsigned *bit_size)
    /* We also need to check that the conversion's dest was actually
     * wider:
     */
-   if (nir_dest_bit_size(alu->dest.dest) <= *bit_size)
+   if (alu->dest.dest.ssa.bit_size <= *bit_size)
       return INVALID_OP;
 
    return alu->op;
index b65a004..4e57324 100644 (file)
@@ -82,7 +82,7 @@ opt_undef_vecN(nir_builder *b, nir_alu_instr *alu)
 
    b->cursor = nir_before_instr(&alu->instr);
    nir_def *undef = nir_undef(b, alu->dest.dest.ssa.num_components,
-                              nir_dest_bit_size(alu->dest.dest));
+                              alu->dest.dest.ssa.bit_size);
    nir_def_rewrite_uses(&alu->dest.dest.ssa, undef);
 
    return true;
index 4a53574..74d3a91 100644 (file)
@@ -48,7 +48,7 @@ nir_split_64bit_vec3_and_vec4_filter(const nir_instr *instr,
 
       switch (intr->intrinsic) {
       case nir_intrinsic_load_deref: {
-         if (nir_dest_bit_size(intr->dest) != 64)
+         if (intr->dest.ssa.bit_size != 64)
             return false;
          nir_variable *var = nir_intrinsic_get_var(intr, 0);
          if (var->data.mode != nir_var_function_temp)
@@ -69,7 +69,7 @@ nir_split_64bit_vec3_and_vec4_filter(const nir_instr *instr,
    }
    case nir_instr_type_phi: {
       nir_phi_instr *phi = nir_instr_as_phi(instr);
-      if (nir_dest_bit_size(phi->dest) != 64)
+      if (phi->dest.ssa.bit_size != 64)
          return false;
       return nir_dest_num_components(phi->dest) >= 3;
    }
index 0e51d03..bb99612 100644 (file)
@@ -248,7 +248,7 @@ validate_alu_instr(nir_alu_instr *instr, validate_state *state)
    }
 
    nir_alu_type dest_type = nir_op_infos[instr->op].output_type;
-   unsigned dest_bit_size = nir_dest_bit_size(instr->dest.dest);
+   unsigned dest_bit_size = instr->dest.dest.ssa.bit_size;
    if (nir_alu_type_get_type_size(dest_type)) {
       validate_assert(state, dest_bit_size == nir_alu_type_get_type_size(dest_type));
    } else if (instr_bit_size) {
@@ -316,7 +316,7 @@ validate_deref_instr(nir_deref_instr *instr, validate_state *state)
       /* The parent pointer value must have the same number of components
        * as the destination.
        */
-      validate_src(&instr->parent, state, nir_dest_bit_size(instr->dest),
+      validate_src(&instr->parent, state, instr->dest.ssa.bit_size,
                    nir_dest_num_components(instr->dest));
 
       nir_instr *parent_instr = instr->parent.ssa->parent_instr;
@@ -357,7 +357,7 @@ validate_deref_instr(nir_deref_instr *instr, validate_state *state)
 
          if (instr->deref_type == nir_deref_type_array) {
             validate_src(&instr->arr.index, state,
-                         nir_dest_bit_size(instr->dest), 1);
+                         instr->dest.ssa.bit_size, 1);
          }
          break;
 
@@ -371,7 +371,7 @@ validate_deref_instr(nir_deref_instr *instr, validate_state *state)
                             parent->deref_type == nir_deref_type_ptr_as_array ||
                             parent->deref_type == nir_deref_type_cast);
          validate_src(&instr->arr.index, state,
-                      nir_dest_bit_size(instr->dest), 1);
+                      instr->dest.ssa.bit_size, 1);
          break;
 
       default:
@@ -478,7 +478,7 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
    case nir_intrinsic_load_reg_indirect:
       validate_register_handle(instr->src[0],
                                nir_dest_num_components(instr->dest),
-                               nir_dest_bit_size(instr->dest), state);
+                               instr->dest.ssa.bit_size, state);
       break;
 
    case nir_intrinsic_store_reg:
@@ -552,7 +552,7 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
    }
 
    case nir_intrinsic_load_ubo_vec4: {
-      int bit_size = nir_dest_bit_size(instr->dest);
+      int bit_size = instr->dest.ssa.bit_size;
       validate_assert(state, bit_size >= 8);
       validate_assert(state, (nir_intrinsic_component(instr) +
                               instr->num_components) *
@@ -587,7 +587,7 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
    case nir_intrinsic_load_per_primitive_output:
    case nir_intrinsic_load_push_constant:
       /* All memory load operations must load at least a byte */
-      validate_assert(state, nir_dest_bit_size(instr->dest) >= 8);
+      validate_assert(state, instr->dest.ssa.bit_size >= 8);
       break;
 
    case nir_intrinsic_store_ssbo:
@@ -645,7 +645,7 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
          }
 
          validate_assert(state, allowed);
-         validate_assert(state, nir_dest_bit_size(instr->dest) ==
+         validate_assert(state, instr->dest.ssa.bit_size ==
                                    util_format_get_blocksizebits(format));
       }
       break;
@@ -852,7 +852,7 @@ validate_tex_instr(nir_tex_instr *instr, validate_state *state)
    unsigned bit_size = nir_alu_type_get_type_size(instr->dest_type);
    validate_assert(state,
                    (bit_size ? bit_size : 32) ==
-                      nir_dest_bit_size(instr->dest));
+                      instr->dest.ssa.bit_size);
 }
 
 static void
index 5d2a463..9321e6b 100644 (file)
@@ -378,7 +378,7 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
    unsigned bs[info->num_inputs]; /* bit size */
    struct ir3_block *b = ctx->block;
    unsigned dst_sz, wrmask;
-   type_t dst_type = type_uint_size(nir_dest_bit_size(alu->dest.dest));
+   type_t dst_type = type_uint_size(alu->dest.dest.ssa.bit_size);
 
    dst_sz = alu->dest.dest.ssa.num_components;
    wrmask = (1 << dst_sz) - 1;
@@ -645,7 +645,7 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
       dst[0] = ir3_MAD_S24(b, src[0], 0, src[1], 0, src[2], 0);
       break;
    case nir_op_imul:
-      compile_assert(ctx, nir_dest_bit_size(alu->dest.dest) == 16);
+      compile_assert(ctx, alu->dest.dest.ssa.bit_size == 16);
       dst[0] = ir3_MUL_S24(b, src[0], 0, src[1], 0);
       break;
    case nir_op_imul24:
@@ -843,7 +843,7 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
    }
 
    if (nir_alu_type_get_base_type(info->output_type) == nir_type_bool) {
-      assert(nir_dest_bit_size(alu->dest.dest) == 1 || alu->op == nir_op_b2b32);
+      assert(alu->dest.dest.ssa.bit_size == 1 || alu->op == nir_op_b2b32);
       assert(dst_sz == 1);
    } else {
       /* 1-bit values stored in 32-bit registers are only valid for certain
@@ -857,7 +857,7 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
       case nir_op_bcsel:
          break;
       default:
-         compile_assert(ctx, nir_dest_bit_size(alu->dest.dest) != 1);
+         compile_assert(ctx, alu->dest.dest.ssa.bit_size != 1);
       }
    }
 
@@ -1484,7 +1484,7 @@ emit_intrinsic_image_size_tex(struct ir3_context *ctx,
    struct tex_src_info info = get_image_ssbo_samp_tex_src(ctx, &intr->src[0], true);
    struct ir3_instruction *sam, *lod;
    unsigned flags, ncoords = ir3_get_image_coords(intr, &flags);
-   type_t dst_type = nir_dest_bit_size(intr->dest) == 16 ? TYPE_U16 : TYPE_U32;
+   type_t dst_type = intr->dest.ssa.bit_size == 16 ? TYPE_U16 : TYPE_U32;
 
    info.flags |= flags;
    assert(nir_src_as_uint(intr->src[1]) == 0);
@@ -1902,7 +1902,7 @@ emit_intrinsic_reduce(struct ir3_context *ctx, nir_intrinsic_instr *intr)
    struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
    nir_op nir_reduce_op = (nir_op) nir_intrinsic_reduction_op(intr);
    reduce_op_t reduce_op = get_reduce_op(nir_reduce_op);
-   unsigned dst_size = nir_dest_bit_size(intr->dest);
+   unsigned dst_size = intr->dest.ssa.bit_size;
    unsigned flags = (ir3_bitsize(ctx, dst_size) == 16) ? IR3_REG_HALF : 0;
 
    /* Note: the shared reg is initialized to the identity, so we need it to
@@ -2046,14 +2046,14 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
          for (int i = 0; i < dest_components; i++) {
             dst[i] = create_uniform_typed(
                b, idx + i,
-               nir_dest_bit_size(intr->dest) == 16 ? TYPE_F16 : TYPE_F32);
+               intr->dest.ssa.bit_size == 16 ? TYPE_F16 : TYPE_F32);
          }
       } else {
          src = ir3_get_src(ctx, &intr->src[0]);
          for (int i = 0; i < dest_components; i++) {
             dst[i] = create_uniform_indirect(
                b, idx + i,
-               nir_dest_bit_size(intr->dest) == 16 ? TYPE_F16 : TYPE_F32,
+               intr->dest.ssa.bit_size == 16 ? TYPE_F16 : TYPE_F32,
                ir3_get_addr0(ctx, src[0], 1));
          }
          /* NOTE: if relative addressing is used, we set
@@ -2567,7 +2567,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
       struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
       struct ir3_instruction *idx = ir3_get_src(ctx, &intr->src[1])[0];
 
-      type_t dst_type = type_uint_size(nir_dest_bit_size(intr->dest));
+      type_t dst_type = type_uint_size(intr->dest.ssa.bit_size);
 
       if (dst_type != TYPE_U32)
          idx = ir3_COV(ctx->block, idx, TYPE_U32, dst_type);
@@ -2580,21 +2580,21 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
    case nir_intrinsic_quad_swap_horizontal: {
       struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
       dst[0] = ir3_QUAD_SHUFFLE_HORIZ(ctx->block, src, 0);
-      dst[0]->cat5.type = type_uint_size(nir_dest_bit_size(intr->dest));
+      dst[0]->cat5.type = type_uint_size(intr->dest.ssa.bit_size);
       break;
    }
 
    case nir_intrinsic_quad_swap_vertical: {
       struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
       dst[0] = ir3_QUAD_SHUFFLE_VERT(ctx->block, src, 0);
-      dst[0]->cat5.type = type_uint_size(nir_dest_bit_size(intr->dest));
+      dst[0]->cat5.type = type_uint_size(intr->dest.ssa.bit_size);
       break;
    }
 
    case nir_intrinsic_quad_swap_diagonal: {
       struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
       dst[0] = ir3_QUAD_SHUFFLE_DIAG(ctx->block, src, 0);
-      dst[0]->cat5.type = type_uint_size(nir_dest_bit_size(intr->dest));
+      dst[0]->cat5.type = type_uint_size(intr->dest.ssa.bit_size);
       break;
    }
 
@@ -3284,7 +3284,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
 
    /* GETLOD returns results in 4.8 fixed point */
    if (opc == OPC_GETLOD) {
-      bool half = nir_dest_bit_size(tex->dest) == 16;
+      bool half = tex->dest.ssa.bit_size == 16;
       struct ir3_instruction *factor =
          half ? create_immed_typed(b, _mesa_float_to_half(1.0 / 256), TYPE_F16)
               : create_immed(b, fui(1.0 / 256));
index 9e73248..b868329 100644 (file)
@@ -116,7 +116,7 @@ type_t
 ir3_get_type_for_image_intrinsic(const nir_intrinsic_instr *instr)
 {
    const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
-   int bit_size = info->has_dest ? nir_dest_bit_size(instr->dest) : nir_src_bit_size(instr->src[3]);
+   int bit_size = info->has_dest ? instr->dest.ssa.bit_size : nir_src_bit_size(instr->src[3]);
 
    nir_alu_type type = nir_type_uint;
    switch (instr->intrinsic) {
index f2e896e..9408f5c 100644 (file)
@@ -594,7 +594,7 @@ ir3_nir_lower_load_const_instr(nir_builder *b, nir_instr *in_instr, void *data)
    }
 
    unsigned num_components = instr->num_components;
-   if (nir_dest_bit_size(instr->dest) == 16) {
+   if (instr->dest.ssa.bit_size == 16) {
       /* We can't do 16b loads -- either from LDC (32-bit only in any of our
        * traces, and disasm that doesn't look like it really supports it) or
        * from the constant file (where CONSTANT_DEMOTION_ENABLE means we get
@@ -614,7 +614,7 @@ ir3_nir_lower_load_const_instr(nir_builder *b, nir_instr *in_instr, void *data)
                    .align_offset = nir_intrinsic_align_offset(instr),
                    .range_base = base, .range = nir_intrinsic_range(instr));
 
-   if (nir_dest_bit_size(instr->dest) == 16) {
+   if (instr->dest.ssa.bit_size == 16) {
       result = nir_bitcast_vector(b, result, 16);
       result = nir_trim_vector(b, result, instr->num_components);
    }
index fd5e5f4..6037fd1 100644 (file)
@@ -50,7 +50,7 @@ lower_64b_intrinsics_filter(const nir_instr *instr, const void *unused)
    if (nir_intrinsic_dest_components(intr) == 0)
       return false;
 
-   return nir_dest_bit_size(intr->dest) == 64;
+   return intr->dest.ssa.bit_size == 64;
 }
 
 static nir_def *
@@ -257,12 +257,12 @@ lower_64b_global(nir_builder *b, nir_instr *instr, void *unused)
 
    if (intr->intrinsic == nir_intrinsic_global_atomic) {
       return nir_global_atomic_ir3(
-            b, nir_dest_bit_size(intr->dest), addr,
+            b, intr->dest.ssa.bit_size, addr,
             nir_ssa_for_src(b, intr->src[1], 1),
          .atomic_op = nir_intrinsic_atomic_op(intr));
    } else if (intr->intrinsic == nir_intrinsic_global_atomic_swap) {
       return nir_global_atomic_swap_ir3(
-         b, nir_dest_bit_size(intr->dest), addr,
+         b, intr->dest.ssa.bit_size, addr,
          nir_ssa_for_src(b, intr->src[1], 1),
          nir_ssa_for_src(b, intr->src[2], 1),
          .atomic_op = nir_intrinsic_atomic_op(intr));
@@ -274,7 +274,7 @@ lower_64b_global(nir_builder *b, nir_instr *instr, void *unused)
       for (unsigned off = 0; off < num_comp;) {
          unsigned c = MIN2(num_comp - off, 4);
          nir_def *val = nir_load_global_ir3(
-               b, c, nir_dest_bit_size(intr->dest),
+               b, c, intr->dest.ssa.bit_size,
                addr, nir_imm_int(b, off));
          for (unsigned i = 0; i < c; i++) {
             components[off++] = nir_channel(b, val, i);
index b0c8724..67655c0 100644 (file)
@@ -81,7 +81,7 @@ lower_wide_load_store(nir_builder *b, nir_instr *instr, void *unused)
       return NIR_LOWER_INSTR_PROGRESS_REPLACE;
    } else {
       unsigned num_comp = nir_intrinsic_dest_components(intr);
-      unsigned bit_size = nir_dest_bit_size(intr->dest);
+      unsigned bit_size = intr->dest.ssa.bit_size;
       nir_def *addr = nir_ssa_for_src(b, intr->src[0], 1);
       nir_def *components[num_comp];
 
index dd3cb82..fcff806 100644 (file)
@@ -1197,7 +1197,7 @@ visit_alu(struct lp_build_nir_context *bld_base,
          result[c] = do_alu_action(bld_base, instr, src_bit_size, src_chan);
          result[c] = cast_type(bld_base, result[c],
                                nir_op_infos[instr->op].output_type,
-                               nir_dest_bit_size(instr->dest.dest));
+                               instr->dest.dest.ssa.bit_size);
       }
    }
    assign_ssa_dest(bld_base, &instr->dest.dest.ssa, result);
@@ -1297,7 +1297,7 @@ visit_load_input(struct lp_build_nir_context *bld_base,
    var.data.location_frac = nir_intrinsic_component(instr);
 
    unsigned nc = nir_dest_num_components(instr->dest);
-   unsigned bit_size = nir_dest_bit_size(instr->dest);
+   unsigned bit_size = instr->dest.ssa.bit_size;
 
    nir_src offset = *nir_get_io_offset_src(instr);
    bool indirect = !nir_src_is_const(offset);
@@ -1436,7 +1436,7 @@ visit_load_var(struct lp_build_nir_context *bld_base,
    LLVMValueRef indir_vertex_index = NULL;
    unsigned vertex_index = 0;
    unsigned nc = nir_dest_num_components(instr->dest);
-   unsigned bit_size = nir_dest_bit_size(instr->dest);
+   unsigned bit_size = instr->dest.ssa.bit_size;
    if (var) {
       bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
          var->data.mode == nir_var_shader_in;
@@ -1522,7 +1522,7 @@ visit_load_ubo(struct lp_build_nir_context *bld_base,
       idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
 
    bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest),
-                      nir_dest_bit_size(instr->dest),
+                      instr->dest.ssa.bit_size,
                       offset_is_uniform, idx, offset, result);
 }
 
@@ -1538,7 +1538,7 @@ visit_load_push_constant(struct lp_build_nir_context *bld_base,
    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
 
    bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest),
-                      nir_dest_bit_size(instr->dest),
+                      instr->dest.ssa.bit_size,
                       offset_is_uniform, idx, offset, result);
 }
 
@@ -1557,7 +1557,7 @@ visit_load_ssbo(struct lp_build_nir_context *bld_base,
       nir_src_is_always_uniform(instr->src[0]) &&
       nir_src_is_always_uniform(instr->src[1]);
    bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest),
-                      nir_dest_bit_size(instr->dest),
+                      instr->dest.ssa.bit_size,
                       index_and_offset_are_uniform, false, idx, offset, result);
 }
 
@@ -1882,7 +1882,7 @@ visit_shared_load(struct lp_build_nir_context *bld_base,
    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
    bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest),
-                      nir_dest_bit_size(instr->dest),
+                      instr->dest.ssa.bit_size,
                       offset_is_uniform, false, NULL, offset, result);
 }
 
@@ -1958,7 +1958,7 @@ visit_load_kernel_input(struct lp_build_nir_context *bld_base,
 
    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
    bld_base->load_kernel_arg(bld_base, nir_dest_num_components(instr->dest),
-                             nir_dest_bit_size(instr->dest),
+                             instr->dest.ssa.bit_size,
                              nir_src_bit_size(instr->src[0]),
                              offset_is_uniform, offset, result);
 }
@@ -1972,7 +1972,7 @@ visit_load_global(struct lp_build_nir_context *bld_base,
    LLVMValueRef addr = get_src(bld_base, instr->src[0]);
    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
    bld_base->load_global(bld_base, nir_dest_num_components(instr->dest),
-                         nir_dest_bit_size(instr->dest),
+                         instr->dest.ssa.bit_size,
                          nir_src_bit_size(instr->src[0]),
                          offset_is_uniform, addr, result);
 }
@@ -2068,7 +2068,7 @@ visit_load_scratch(struct lp_build_nir_context *bld_base,
    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
 
    bld_base->load_scratch(bld_base, nir_dest_num_components(instr->dest),
-                          nir_dest_bit_size(instr->dest), offset, result);
+                          instr->dest.ssa.bit_size, offset, result);
 }
 
 
@@ -2092,7 +2092,7 @@ visit_payload_load(struct lp_build_nir_context *bld_base,
    LLVMValueRef offset = get_src(bld_base, instr->src[0]);
    bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
    bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest),
-                      nir_dest_bit_size(instr->dest),
+                      instr->dest.ssa.bit_size,
                       offset_is_uniform, true, NULL, offset, result);
 }
 
@@ -2668,8 +2668,8 @@ visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
    params.sampler_resource = sampler_resource;
    bld_base->tex(bld_base, &params);
 
-   if (nir_dest_bit_size(instr->dest) != 32) {
-      assert(nir_dest_bit_size(instr->dest) == 16);
+   if (instr->dest.ssa.bit_size != 32) {
+      assert(instr->dest.ssa.bit_size == 16);
       LLVMTypeRef vec_type = NULL;
       bool is_float = false;
       switch (nir_alu_type_get_base_type(instr->dest_type)) {
index 256f5d7..f895392 100644 (file)
@@ -1442,7 +1442,7 @@ ntt_emit_alu(struct ntt_compile *c, nir_alu_instr *instr)
    struct ureg_src src[4];
    struct ureg_dst dst;
    unsigned i;
-   int dst_64 = nir_dest_bit_size(instr->dest.dest) == 64;
+   int dst_64 = instr->dest.dest.ssa.bit_size == 64;
    int src_64 = nir_src_bit_size(instr->src[0].src) == 64;
    int num_srcs = nir_op_infos[instr->op].num_inputs;
 
@@ -1879,7 +1879,7 @@ ntt_shift_by_frac(struct ureg_src src, unsigned frac, unsigned num_components)
 static void
 ntt_emit_load_ubo(struct ntt_compile *c, nir_intrinsic_instr *instr)
 {
-   int bit_size = nir_dest_bit_size(instr->dest);
+   int bit_size = instr->dest.ssa.bit_size;
    assert(bit_size == 32 || instr->num_components <= 2);
 
    struct ureg_src src = ureg_src_register(TGSI_FILE_CONSTANT, 0);
@@ -2234,7 +2234,7 @@ ntt_emit_load_input(struct ntt_compile *c, nir_intrinsic_instr *instr)
    unsigned base = nir_intrinsic_base(instr);
    struct ureg_src input;
    nir_io_semantics semantics = nir_intrinsic_io_semantics(instr);
-   bool is_64 = nir_dest_bit_size(instr->dest) == 64;
+   bool is_64 = instr->dest.ssa.bit_size == 64;
 
    if (c->s->info.stage == MESA_SHADER_VERTEX) {
       input = ureg_DECL_vs_input(c->ureg, base);
@@ -3226,7 +3226,7 @@ ntt_should_vectorize_instr(const nir_instr *instr, const void *data)
    }
 
    int src_bit_size = nir_src_bit_size(alu->src[0].src);
-   int dst_bit_size = nir_dest_bit_size(alu->dest.dest);
+   int dst_bit_size = alu->dest.dest.ssa.bit_size;
 
    if (src_bit_size == 64 || dst_bit_size == 64) {
       /* Avoid vectorizing 64-bit instructions at all.  Despite tgsi.rst
@@ -3366,7 +3366,7 @@ scalarize_64bit(const nir_instr *instr, const void *data)
 {
    const nir_alu_instr *alu = nir_instr_as_alu(instr);
 
-   return (nir_dest_bit_size(alu->dest.dest) == 64 ||
+   return (alu->dest.dest.ssa.bit_size == 64 ||
            nir_src_bit_size(alu->src[0].src) == 64);
 }
 
@@ -3395,7 +3395,7 @@ nir_to_tgsi_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
 
    bool has_dest = nir_intrinsic_infos[instr->intrinsic].has_dest;
    if (has_dest) {
-      if (nir_dest_bit_size(instr->dest) != 64)
+      if (instr->dest.ssa.bit_size != 64)
          return false;
    } else  {
       if (nir_src_bit_size(instr->src[0]) != 64)
@@ -3824,7 +3824,7 @@ ntt_vec_to_mov_writemask_cb(const nir_instr *instr, unsigned writemask, UNUSED c
       return false;
 
    nir_alu_instr *alu = nir_instr_as_alu(instr);
-   int dst_32 = nir_dest_bit_size(alu->dest.dest) == 32;
+   int dst_32 = alu->dest.dest.ssa.bit_size == 32;
    int src_64 = nir_src_bit_size(alu->src[0].src) == 64;
 
    if (src_64 && dst_32) {
index 5b3814c..80f5a58 100644 (file)
@@ -183,9 +183,9 @@ record_loads(nir_builder *b, nir_instr *instr, void *data)
    if (intr->intrinsic != nir_intrinsic_load_preamble)
       return false;
 
-   assert(nir_dest_bit_size(intr->dest) >= 16 && "no 8-bit sysvals");
+   assert(intr->dest.ssa.bit_size >= 16 && "no 8-bit sysvals");
    unsigned dim = nir_dest_num_components(intr->dest);
-   unsigned element_size = nir_dest_bit_size(intr->dest) / 16;
+   unsigned element_size = intr->dest.ssa.bit_size / 16;
    unsigned length = dim * element_size;
 
    struct state *state = data;
index 3ef65dc..15c0590 100644 (file)
@@ -46,7 +46,7 @@ r600_legalize_image_load_store_impl(nir_builder *b,
 
    if (load_value)
       default_value =
-         nir_imm_zero(b, nir_dest_num_components(ir->dest), nir_dest_bit_size(ir->dest));
+         nir_imm_zero(b, nir_dest_num_components(ir->dest), ir->dest.ssa.bit_size);
 
    auto image_exists =
       nir_ult_imm(b, ir->src[0].ssa, b->shader->info.num_images);
index 2cfc911..7ba7878 100644 (file)
@@ -112,7 +112,7 @@ LowerLoad64Uniform::filter(const nir_instr *instr) const
        intr->intrinsic != nir_intrinsic_load_ubo_vec4)
       return false;
 
-   return nir_dest_bit_size(intr->dest) == 64;
+   return intr->dest.ssa.bit_size == 64;
 }
 
 nir_def *
@@ -156,7 +156,7 @@ class LowerSplit64op : public NirLowerInstruction {
          auto alu = nir_instr_as_alu(instr);
          switch (alu->op) {
          case nir_op_bcsel:
-            return nir_dest_bit_size(alu->dest.dest) == 64;
+            return alu->dest.dest.ssa.bit_size == 64;
          case nir_op_f2i32:
          case nir_op_f2u32:
          case nir_op_f2i64:
@@ -285,7 +285,7 @@ LowerSplit64BitVar::filter(const nir_instr *instr) const
       case nir_intrinsic_load_input:
       case nir_intrinsic_load_ubo:
       case nir_intrinsic_load_ssbo:
-         if (nir_dest_bit_size(intr->dest) != 64)
+         if (intr->dest.ssa.bit_size != 64)
             return false;
          return nir_dest_num_components(intr->dest) >= 3;
       case nir_intrinsic_store_output:
@@ -306,7 +306,7 @@ LowerSplit64BitVar::filter(const nir_instr *instr) const
       case nir_op_bcsel:
          if (nir_dest_num_components(alu->dest.dest) < 3)
             return false;
-         return nir_dest_bit_size(alu->dest.dest) == 64;
+         return alu->dest.dest.ssa.bit_size == 64;
       case nir_op_bany_fnequal3:
       case nir_op_bany_fnequal4:
       case nir_op_ball_fequal3:
@@ -833,7 +833,7 @@ Lower64BitToVec2::filter(const nir_instr *instr) const
       case nir_intrinsic_load_global:
       case nir_intrinsic_load_ubo_vec4:
       case nir_intrinsic_load_ssbo:
-         return nir_dest_bit_size(intr->dest) == 64;
+         return intr->dest.ssa.bit_size == 64;
       case nir_intrinsic_store_deref: {
          if (nir_src_bit_size(intr->src[1]) == 64)
             return true;
@@ -850,11 +850,11 @@ Lower64BitToVec2::filter(const nir_instr *instr) const
    }
    case nir_instr_type_alu: {
       auto alu = nir_instr_as_alu(instr);
-      return nir_dest_bit_size(alu->dest.dest) == 64;
+      return alu->dest.dest.ssa.bit_size == 64;
    }
    case nir_instr_type_phi: {
       auto phi = nir_instr_as_phi(instr);
-      return nir_dest_bit_size(phi->dest) == 64;
+      return phi->dest.ssa.bit_size == 64;
    }
    case nir_instr_type_load_const: {
       auto lc = nir_instr_as_load_const(instr);
@@ -1286,7 +1286,7 @@ r600_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
 
    bool has_dest = nir_intrinsic_infos[instr->intrinsic].has_dest;
    if (has_dest) {
-      if (nir_dest_bit_size(instr->dest) != 64)
+      if (instr->dest.ssa.bit_size != 64)
          return false;
    } else {
       if (nir_src_bit_size(instr->src[0]) != 64)
index 236dffa..6789780 100644 (file)
@@ -29,7 +29,7 @@ static uint8_t si_vectorize_callback(const nir_instr *instr, const void *data)
       return 0;
 
    nir_alu_instr *alu = nir_instr_as_alu(instr);
-   if (nir_dest_bit_size(alu->dest.dest) == 16) {
+   if (alu->dest.dest.ssa.bit_size == 16) {
       switch (alu->op) {
       case nir_op_unpack_32_2x16_split_x:
       case nir_op_unpack_32_2x16_split_y:
@@ -52,7 +52,7 @@ static unsigned si_lower_bit_size_callback(const nir_instr *instr, void *data)
    switch (alu->op) {
    case nir_op_imul_high:
    case nir_op_umul_high:
-      if (nir_dest_bit_size(alu->dest.dest) < 32)
+      if (alu->dest.dest.ssa.bit_size < 32)
          return 32;
       break;
    default:
index a8a55be..5d0772b 100644 (file)
@@ -2158,7 +2158,7 @@ emit_alu(struct ntv_context *ctx, nir_alu_instr *alu)
       }
    }
 
-   unsigned bit_size = nir_dest_bit_size(alu->dest.dest);
+   unsigned bit_size = alu->dest.dest.ssa.bit_size;
    unsigned num_components = nir_dest_num_components(alu->dest.dest);
    nir_alu_type atype = bit_size == 1 ?
                         nir_type_bool :
@@ -2713,7 +2713,7 @@ emit_load_shared(struct ntv_context *ctx, nir_intrinsic_instr *intr)
 {
    SpvId dest_type = get_def_type(ctx, &intr->dest.ssa, nir_type_uint);
    unsigned num_components = nir_dest_num_components(intr->dest);
-   unsigned bit_size = nir_dest_bit_size(intr->dest);
+   unsigned bit_size = intr->dest.ssa.bit_size;
    SpvId uint_type = get_uvec_type(ctx, bit_size, 1);
    SpvId ptr_type = spirv_builder_type_pointer(&ctx->builder,
                                                SpvStorageClassWorkgroup,
@@ -2775,7 +2775,7 @@ emit_load_scratch(struct ntv_context *ctx, nir_intrinsic_instr *intr)
 {
    SpvId dest_type = get_def_type(ctx, &intr->dest.ssa, nir_type_uint);
    unsigned num_components = nir_dest_num_components(intr->dest);
-   unsigned bit_size = nir_dest_bit_size(intr->dest);
+   unsigned bit_size = intr->dest.ssa.bit_size;
    SpvId uint_type = get_uvec_type(ctx, bit_size, 1);
    SpvId ptr_type = spirv_builder_type_pointer(&ctx->builder,
                                                SpvStorageClassPrivate,
@@ -3047,13 +3047,16 @@ emit_load_vec_input(struct ntv_context *ctx, nir_intrinsic_instr *intr, SpvId *v
       var_type = get_bvec_type(ctx, nir_dest_num_components(intr->dest));
       break;
    case nir_type_int:
-      var_type = get_ivec_type(ctx, nir_dest_bit_size(intr->dest), nir_dest_num_components(intr->dest));
+      var_type = get_ivec_type(ctx, intr->dest.ssa.bit_size,
+                               nir_dest_num_components(intr->dest));
       break;
    case nir_type_uint:
-      var_type = get_uvec_type(ctx, nir_dest_bit_size(intr->dest), nir_dest_num_components(intr->dest));
+      var_type = get_uvec_type(ctx, intr->dest.ssa.bit_size,
+                               nir_dest_num_components(intr->dest));
       break;
    case nir_type_float:
-      var_type = get_fvec_type(ctx, nir_dest_bit_size(intr->dest), nir_dest_num_components(intr->dest));
+      var_type = get_fvec_type(ctx, intr->dest.ssa.bit_size,
+                               nir_dest_num_components(intr->dest));
       break;
    default:
       unreachable("unknown type passed");
@@ -3112,7 +3115,9 @@ static void
 handle_atomic_op(struct ntv_context *ctx, nir_intrinsic_instr *intr, SpvId ptr, SpvId param, SpvId param2, nir_alu_type type)
 {
    SpvId dest_type = get_def_type(ctx, &intr->dest.ssa, type);
-   SpvId result = emit_atomic(ctx, get_atomic_op(ctx, nir_dest_bit_size(intr->dest), nir_intrinsic_atomic_op(intr)), dest_type, ptr, param, param2);
+   SpvId result = emit_atomic(ctx,
+                              get_atomic_op(ctx, intr->dest.ssa.bit_size, nir_intrinsic_atomic_op(intr)),
+                              dest_type, ptr, param, param2);
    assert(result);
    store_def(ctx, &intr->dest.ssa, result, type);
 }
@@ -4175,7 +4180,7 @@ emit_tex(struct ntv_context *ctx, nir_tex_instr *tex)
    if (tex->is_sparse)
       result = extract_sparse_load(ctx, result, actual_dest_type, &tex->dest.ssa);
 
-   if (nir_dest_bit_size(tex->dest) != 32) {
+   if (tex->dest.ssa.bit_size != 32) {
       /* convert FP32 to FP16 */
       result = emit_unop(ctx, SpvOpFConvert, dest_type, result);
    }
index f8d72a1..2bf857c 100644 (file)
@@ -1529,19 +1529,19 @@ bound_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
 
    switch (intr->intrinsic) {
    case nir_intrinsic_store_ssbo:
-      var = bo->ssbo[nir_dest_bit_size(intr->dest) >> 4];
+      var = bo->ssbo[intr->dest.ssa.bit_size >> 4];
       offset = intr->src[2].ssa;
       is_load = false;
       break;
    case nir_intrinsic_load_ssbo:
-      var = bo->ssbo[nir_dest_bit_size(intr->dest) >> 4];
+      var = bo->ssbo[intr->dest.ssa.bit_size >> 4];
       offset = intr->src[1].ssa;
       break;
    case nir_intrinsic_load_ubo:
       if (nir_src_is_const(intr->src[0]) && nir_src_as_const_value(intr->src[0])->u32 == 0)
-         var = bo->uniforms[nir_dest_bit_size(intr->dest) >> 4];
+         var = bo->uniforms[intr->dest.ssa.bit_size >> 4];
       else
-         var = bo->ubo[nir_dest_bit_size(intr->dest) >> 4];
+         var = bo->ubo[intr->dest.ssa.bit_size >> 4];
       offset = intr->src[1].ssa;
       break;
    default:
@@ -1564,7 +1564,7 @@ bound_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
       if (offset_bytes + i >= size) {
          rewrites++;
          if (is_load)
-            result[i] = nir_imm_zero(b, 1, nir_dest_bit_size(intr->dest));
+            result[i] = nir_imm_zero(b, 1, intr->dest.ssa.bit_size);
       }
    }
    assert(rewrites == intr->num_components);
@@ -2144,7 +2144,7 @@ rewrite_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
    case nir_intrinsic_ssbo_atomic:
    case nir_intrinsic_ssbo_atomic_swap: {
       /* convert offset to uintN_t[idx] */
-      nir_def *offset = nir_udiv_imm(b, intr->src[1].ssa, nir_dest_bit_size(intr->dest) / 8);
+      nir_def *offset = nir_udiv_imm(b, intr->src[1].ssa, intr->dest.ssa.bit_size / 8);
       nir_instr_rewrite_src_ssa(instr, &intr->src[1], offset);
       return true;
    }
@@ -2154,10 +2154,10 @@ rewrite_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
       bool force_2x32 = intr->intrinsic == nir_intrinsic_load_ubo &&
                         nir_src_is_const(intr->src[0]) &&
                         nir_src_as_uint(intr->src[0]) == 0 &&
-                        nir_dest_bit_size(intr->dest) == 64 &&
+                        intr->dest.ssa.bit_size == 64 &&
                         nir_intrinsic_align_offset(intr) % 8 != 0;
-      force_2x32 |= nir_dest_bit_size(intr->dest) == 64 && !has_int64;
-      nir_def *offset = nir_udiv_imm(b, intr->src[1].ssa, (force_2x32 ? 32 : nir_dest_bit_size(intr->dest)) / 8);
+      force_2x32 |= intr->dest.ssa.bit_size == 64 && !has_int64;
+      nir_def *offset = nir_udiv_imm(b, intr->src[1].ssa, (force_2x32 ? 32 : intr->dest.ssa.bit_size) / 8);
       nir_instr_rewrite_src_ssa(instr, &intr->src[1], offset);
       /* if 64bit isn't supported, 64bit loads definitely aren't supported, so rewrite as 2x32 with cast and pray */
       if (force_2x32) {
@@ -2181,8 +2181,8 @@ rewrite_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
    }
    case nir_intrinsic_load_shared:
       b->cursor = nir_before_instr(instr);
-      bool force_2x32 = nir_dest_bit_size(intr->dest) == 64 && !has_int64;
-      nir_def *offset = nir_udiv_imm(b, intr->src[0].ssa, (force_2x32 ? 32 : nir_dest_bit_size(intr->dest)) / 8);
+      bool force_2x32 = intr->dest.ssa.bit_size == 64 && !has_int64;
+      nir_def *offset = nir_udiv_imm(b, intr->src[0].ssa, (force_2x32 ? 32 : intr->dest.ssa.bit_size) / 8);
       nir_instr_rewrite_src_ssa(instr, &intr->src[0], offset);
       /* if 64bit isn't supported, 64bit loads definitely aren't supported, so rewrite as 2x32 with cast and pray */
       if (force_2x32) {
@@ -2311,7 +2311,8 @@ rewrite_atomic_ssbo_instr(nir_builder *b, nir_instr *instr, struct bo_vars *bo)
       unreachable("unknown intrinsic");
    nir_def *offset = intr->src[1].ssa;
    nir_src *src = &intr->src[0];
-   nir_variable *var = get_bo_var(b->shader, bo, true, src, nir_dest_bit_size(intr->dest));
+   nir_variable *var = get_bo_var(b->shader, bo, true, src,
+                                  intr->dest.ssa.bit_size);
    nir_deref_instr *deref_var = nir_build_deref_var(b, var);
    nir_def *idx = src->ssa;
    if (bo->first_ssbo)
@@ -2326,7 +2327,7 @@ rewrite_atomic_ssbo_instr(nir_builder *b, nir_instr *instr, struct bo_vars *bo)
       nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct, offset);
       nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(b->shader, op);
       nir_def_init(&new_instr->instr, &new_instr->dest.ssa, 1,
-                   nir_dest_bit_size(intr->dest));
+                   intr->dest.ssa.bit_size);
       nir_intrinsic_set_atomic_op(new_instr, nir_intrinsic_atomic_op(intr));
       new_instr->src[0] = nir_src_for_ssa(&deref_arr->dest.ssa);
       /* deref ops have no offset src, so copy the srcs after it */
@@ -2369,12 +2370,12 @@ remove_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
       break;
    case nir_intrinsic_load_ssbo:
       src = &intr->src[0];
-      var = get_bo_var(b->shader, bo, true, src, nir_dest_bit_size(intr->dest));
+      var = get_bo_var(b->shader, bo, true, src, intr->dest.ssa.bit_size);
       offset = intr->src[1].ssa;
       break;
    case nir_intrinsic_load_ubo:
       src = &intr->src[0];
-      var = get_bo_var(b->shader, bo, false, src, nir_dest_bit_size(intr->dest));
+      var = get_bo_var(b->shader, bo, false, src, intr->dest.ssa.bit_size);
       offset = intr->src[1].ssa;
       ssbo = false;
       break;
@@ -2389,13 +2390,15 @@ remove_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
       idx = nir_iadd_imm(b, idx, -bo->first_ubo);
    else if (ssbo && bo->first_ssbo)
       idx = nir_iadd_imm(b, idx, -bo->first_ssbo);
-   nir_deref_instr *deref_array = nir_build_deref_array(b, deref_var, nir_i2iN(b, idx, nir_dest_bit_size(deref_var->dest)));
+   nir_deref_instr *deref_array = nir_build_deref_array(b, deref_var,
+                                                        nir_i2iN(b, idx, deref_var->dest.ssa.bit_size));
    nir_deref_instr *deref_struct = nir_build_deref_struct(b, deref_array, 0);
    assert(intr->num_components <= 2);
    if (is_load) {
       nir_def *result[2];
       for (unsigned i = 0; i < intr->num_components; i++) {
-         nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct, nir_i2iN(b, offset, nir_dest_bit_size(deref_struct->dest)));
+         nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct,
+                                                            nir_i2iN(b, offset, deref_struct->dest.ssa.bit_size));
          result[i] = nir_load_deref(b, deref_arr);
          if (intr->intrinsic == nir_intrinsic_load_ssbo)
             nir_intrinsic_set_access(nir_instr_as_intrinsic(result[i]->parent_instr), nir_intrinsic_access(intr));
@@ -2404,7 +2407,8 @@ remove_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
       nir_def *load = nir_vec(b, result, intr->num_components);
       nir_def_rewrite_uses(&intr->dest.ssa, load);
    } else {
-      nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct, nir_i2iN(b, offset, nir_dest_bit_size(deref_struct->dest)));
+      nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct,
+                                                         nir_i2iN(b, offset, deref_struct->dest.ssa.bit_size));
       nir_build_store_deref(b, &deref_arr->dest.ssa, intr->src[0].ssa, BITFIELD_MASK(intr->num_components), nir_intrinsic_access(intr));
    }
    nir_instr_remove(instr);
@@ -2638,7 +2642,8 @@ rewrite_read_as_0(nir_builder *b, nir_instr *instr, void *data)
    if (deref_var != var)
       return false;
    b->cursor = nir_before_instr(instr);
-   nir_def *zero = nir_imm_zero(b, nir_dest_num_components(intr->dest), nir_dest_bit_size(intr->dest));
+   nir_def *zero = nir_imm_zero(b, nir_dest_num_components(intr->dest),
+                                intr->dest.ssa.bit_size);
    if (b->shader->info.stage == MESA_SHADER_FRAGMENT) {
       switch (var->data.location) {
       case VARYING_SLOT_COL0:
@@ -3307,7 +3312,7 @@ rewrite_tex_dest(nir_builder *b, nir_tex_instr *tex, nir_variable *var, struct z
    enum glsl_base_type ret_type = glsl_get_sampler_result_type(type);
    bool is_int = glsl_base_type_is_integer(ret_type);
    unsigned bit_size = glsl_base_type_get_bit_size(ret_type);
-   unsigned dest_size = nir_dest_bit_size(tex->dest);
+   unsigned dest_size = tex->dest.ssa.bit_size;
    b->cursor = nir_after_instr(&tex->instr);
    unsigned num_components = nir_dest_num_components(tex->dest);
    bool rewrite_depth = tex->is_shadow && num_components > 1 && tex->op != nir_texop_tg4 && !tex->is_sparse;
@@ -3408,13 +3413,13 @@ lower_zs_swizzle_tex_instr(nir_builder *b, nir_instr *instr, void *data)
          nir_def *swizzle;
          switch (swizzle_key->swizzle[sampler_id].s[tex->component]) {
          case PIPE_SWIZZLE_0:
-            swizzle = nir_imm_zero(b, 4, nir_dest_bit_size(tex->dest));
+            swizzle = nir_imm_zero(b, 4, tex->dest.ssa.bit_size);
             break;
          case PIPE_SWIZZLE_1:
             if (is_int)
-               swizzle = nir_imm_intN_t(b, 4, nir_dest_bit_size(tex->dest));
+               swizzle = nir_imm_intN_t(b, 4, tex->dest.ssa.bit_size);
             else
-               swizzle = nir_imm_floatN_t(b, 4, nir_dest_bit_size(tex->dest));
+               swizzle = nir_imm_floatN_t(b, 4, tex->dest.ssa.bit_size);
             break;
          default:
             if (!tex->component)
@@ -3429,13 +3434,13 @@ lower_zs_swizzle_tex_instr(nir_builder *b, nir_instr *instr, void *data)
       for (unsigned i = 0; i < ARRAY_SIZE(vec); i++) {
          switch (swizzle_key->swizzle[sampler_id].s[i]) {
          case PIPE_SWIZZLE_0:
-            vec[i] = nir_imm_zero(b, 1, nir_dest_bit_size(tex->dest));
+            vec[i] = nir_imm_zero(b, 1, tex->dest.ssa.bit_size);
             break;
          case PIPE_SWIZZLE_1:
             if (is_int)
-               vec[i] = nir_imm_intN_t(b, 1, nir_dest_bit_size(tex->dest));
+               vec[i] = nir_imm_intN_t(b, 1, tex->dest.ssa.bit_size);
             else
-               vec[i] = nir_imm_floatN_t(b, 1, nir_dest_bit_size(tex->dest));
+               vec[i] = nir_imm_floatN_t(b, 1, tex->dest.ssa.bit_size);
             break;
          default:
             vec[i] = dest->num_components == 1 ? dest : nir_channel(b, dest, i);
index 4a5b266..7cc0749 100644 (file)
@@ -178,7 +178,7 @@ create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_def *coor
 
    nir_def_init(&array_tex->instr, &array_tex->dest.ssa,
                 nir_tex_instr_dest_size(array_tex),
-                nir_dest_bit_size(tex->dest));
+                tex->dest.ssa.bit_size);
    nir_builder_instr_insert(b, &array_tex->instr);
    return &array_tex->dest.ssa;
 }
@@ -448,7 +448,7 @@ lower_tex_to_txl(nir_builder *b, nir_tex_instr *tex)
    b->cursor = nir_before_instr(&tex->instr);
    nir_def_init(&txl->instr, &txl->dest.ssa,
                 nir_dest_num_components(tex->dest),
-                nir_dest_bit_size(tex->dest));
+                tex->dest.ssa.bit_size);
    nir_builder_instr_insert(b, &txl->instr);
    nir_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa);
    return txl;
index d3d5253..5cbda58 100644 (file)
@@ -165,7 +165,7 @@ clover_lower_nir_instr(nir_builder *b, nir_instr *instr, void *_state)
       }
 
       return nir_u2uN(b, nir_vec(b, loads, state->global_dims),
-                     nir_dest_bit_size(intrinsic->dest));
+                     intrinsic->dest.ssa.bit_size);
    }
    case nir_intrinsic_load_constant_base_ptr: {
       return nir_load_var(b, state->constant_var);
index 10ca33c..a67d736 100644 (file)
@@ -67,7 +67,8 @@ rusticl_lower_intrinsics_instr(
         return nir_load_var(b, state->printf_buf);
     case nir_intrinsic_load_work_dim:
         assert(state->work_dim);
-        return nir_u2uN(b, nir_load_var(b, state->work_dim), nir_dest_bit_size(intrins->dest));
+        return nir_u2uN(b, nir_load_var(b, state->work_dim),
+                        intrins->dest.ssa.bit_size);
     default:
         return NULL;
     }
index e5e1f6d..0427be4 100644 (file)
@@ -644,7 +644,7 @@ fs_visitor::prepare_alu_destination_and_sources(const fs_builder &bld,
 
    result.type = brw_type_for_nir_type(devinfo,
       (nir_alu_type)(nir_op_infos[instr->op].output_type |
-                     nir_dest_bit_size(instr->dest.dest)));
+                     instr->dest.dest.ssa.bit_size));
 
    for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
       op[i] = get_nir_src(instr->src[i].src);
@@ -732,7 +732,7 @@ fs_visitor::try_emit_b2fi_of_inot(const fs_builder &bld,
     * The source restriction is just because I was lazy about generating the
     * constant below.
     */
-   if (nir_dest_bit_size(instr->dest.dest) != 32 ||
+   if (instr->dest.dest.ssa.bit_size != 32 ||
        nir_src_bit_size(inot_instr->src[0].src) != 32)
       return false;
 
@@ -1234,13 +1234,13 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
 
    case nir_op_irhadd:
    case nir_op_urhadd:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       inst = bld.AVG(result, op[0], op[1]);
       break;
 
    case nir_op_ihadd:
    case nir_op_uhadd: {
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       fs_reg tmp = bld.vgrf(result.type);
 
       if (devinfo->ver >= 8) {
@@ -1292,7 +1292,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
       const enum brw_reg_type dword_type =
          ud ? BRW_REGISTER_TYPE_UD : BRW_REGISTER_TYPE_D;
 
-      assert(nir_dest_bit_size(instr->dest.dest) == 32);
+      assert(instr->dest.dest.ssa.bit_size == 32);
 
       /* Before copy propagation there are no immediate values. */
       assert(op[0].file != IMM && op[1].file != IMM);
@@ -1308,14 +1308,14 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
    }
 
    case nir_op_imul:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       bld.MUL(result, op[0], op[1]);
       break;
 
    case nir_op_imul_high:
    case nir_op_umul_high:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
-      if (nir_dest_bit_size(instr->dest.dest) == 32) {
+      assert(instr->dest.dest.ssa.bit_size < 64);
+      if (instr->dest.dest.ssa.bit_size == 32) {
          bld.emit(SHADER_OPCODE_MULH, result, op[0], op[1]);
       } else {
          fs_reg tmp = bld.vgrf(brw_reg_type_from_bit_size(32, op[0].type));
@@ -1326,7 +1326,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
 
    case nir_op_idiv:
    case nir_op_udiv:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       bld.emit(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
       break;
 
@@ -1342,7 +1342,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
        * appears that our hardware just does the right thing for signed
        * remainder.
        */
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
       break;
 
@@ -1458,7 +1458,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
             result.type =
                brw_type_for_nir_type(devinfo,
                                      (nir_alu_type)(nir_type_int |
-                                                    nir_dest_bit_size(instr->dest.dest)));
+                                                    instr->dest.dest.ssa.bit_size));
             op[0].type =
                brw_type_for_nir_type(devinfo,
                                      (nir_alu_type)(nir_type_int |
@@ -1673,25 +1673,25 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
       break;
 
    case nir_op_bitfield_reverse:
-      assert(nir_dest_bit_size(instr->dest.dest) == 32);
+      assert(instr->dest.dest.ssa.bit_size == 32);
       assert(nir_src_bit_size(instr->src[0].src) == 32);
       bld.BFREV(result, op[0]);
       break;
 
    case nir_op_bit_count:
-      assert(nir_dest_bit_size(instr->dest.dest) == 32);
+      assert(instr->dest.dest.ssa.bit_size == 32);
       assert(nir_src_bit_size(instr->src[0].src) < 64);
       bld.CBIT(result, op[0]);
       break;
 
    case nir_op_uclz:
-      assert(nir_dest_bit_size(instr->dest.dest) == 32);
+      assert(instr->dest.dest.ssa.bit_size == 32);
       assert(nir_src_bit_size(instr->src[0].src) == 32);
       bld.LZD(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
       break;
 
    case nir_op_ifind_msb: {
-      assert(nir_dest_bit_size(instr->dest.dest) == 32);
+      assert(instr->dest.dest.ssa.bit_size == 32);
       assert(nir_src_bit_size(instr->src[0].src) == 32);
       assert(devinfo->ver >= 7);
 
@@ -1711,7 +1711,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
    }
 
    case nir_op_find_lsb:
-      assert(nir_dest_bit_size(instr->dest.dest) == 32);
+      assert(instr->dest.dest.ssa.bit_size == 32);
       assert(nir_src_bit_size(instr->src[0].src) == 32);
       assert(devinfo->ver >= 7);
       bld.FBL(result, op[0]);
@@ -1722,15 +1722,15 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
       unreachable("should have been lowered");
    case nir_op_ubfe:
    case nir_op_ibfe:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       bld.BFE(result, op[2], op[1], op[0]);
       break;
    case nir_op_bfm:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       bld.BFI1(result, op[0], op[1]);
       break;
    case nir_op_bfi:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
 
       /* bfi is ((...) | (~src0 & src2)). The second part is zero when src2 is
        * either 0 or src0. Replacing the 0 with another value can eliminate a
@@ -1870,7 +1870,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
        *    There is no direct conversion from B/UB to Q/UQ or Q/UQ to B/UB.
        *    Use two instructions and a word or DWord intermediate integer type.
        */
-      if (nir_dest_bit_size(instr->dest.dest) == 64) {
+      if (instr->dest.dest.ssa.bit_size == 64) {
          const brw_reg_type type = brw_int_type(1, instr->op == nir_op_extract_i8);
 
          if (instr->op == nir_op_extract_i8) {
@@ -2623,7 +2623,7 @@ fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld,
       unreachable("should be lowered by nir_lower_system_values()");
 
    case nir_intrinsic_load_input: {
-      assert(nir_dest_bit_size(instr->dest) == 32);
+      assert(instr->dest.ssa.bit_size == 32);
       fs_reg src = fs_reg(ATTR, nir_intrinsic_base(instr) * 4, dest.type);
       src = offset(src, bld, nir_intrinsic_component(instr));
       src = offset(src, bld, nir_src_as_uint(instr->src[0]));
@@ -2774,7 +2774,7 @@ fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld,
       break;
 
    case nir_intrinsic_load_per_vertex_input: {
-      assert(nir_dest_bit_size(instr->dest) == 32);
+      assert(instr->dest.ssa.bit_size == 32);
       fs_reg indirect_offset = get_indirect_offset(instr);
       unsigned imm_offset = nir_intrinsic_base(instr);
       fs_inst *inst;
@@ -2851,7 +2851,7 @@ fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld,
 
    case nir_intrinsic_load_output:
    case nir_intrinsic_load_per_vertex_output: {
-      assert(nir_dest_bit_size(instr->dest) == 32);
+      assert(instr->dest.ssa.bit_size == 32);
       fs_reg indirect_offset = get_indirect_offset(instr);
       unsigned imm_offset = nir_intrinsic_base(instr);
       unsigned first_component = nir_intrinsic_component(instr);
@@ -2994,7 +2994,7 @@ fs_visitor::nir_emit_tes_intrinsic(const fs_builder &bld,
 
    case nir_intrinsic_load_input:
    case nir_intrinsic_load_per_vertex_input: {
-      assert(nir_dest_bit_size(instr->dest) == 32);
+      assert(instr->dest.ssa.bit_size == 32);
       fs_reg indirect_offset = get_indirect_offset(instr);
       unsigned imm_offset = nir_intrinsic_base(instr);
       unsigned first_component = nir_intrinsic_component(instr);
@@ -3481,7 +3481,7 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
       /* In Fragment Shaders load_input is used either for flat inputs or
        * per-primitive inputs.
        */
-      assert(nir_dest_bit_size(instr->dest) == 32);
+      assert(instr->dest.ssa.bit_size == 32);
       unsigned base = nir_intrinsic_base(instr);
       unsigned comp = nir_intrinsic_component(instr);
       unsigned num_components = instr->num_components;
@@ -3732,7 +3732,7 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
    }
 
    case nir_intrinsic_load_num_workgroups: {
-      assert(nir_dest_bit_size(instr->dest) == 32);
+      assert(instr->dest.ssa.bit_size == 32);
 
       cs_prog_data->uses_num_work_groups = true;
 
@@ -3758,7 +3758,7 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
    case nir_intrinsic_load_shared: {
       assert(devinfo->ver >= 7);
 
-      const unsigned bit_size = nir_dest_bit_size(instr->dest);
+      const unsigned bit_size = instr->dest.ssa.bit_size;
       fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
       srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GFX7_BTI_SLM);
 
@@ -4889,7 +4889,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
                VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i),
                                           surface, surface_handle,
                                           base_offset, i * type_sz(dest.type),
-                                          nir_dest_bit_size(instr->dest) / 8);
+                                          instr->dest.ssa.bit_size / 8);
 
             prog_data->has_ubo_pull = true;
          } else {
@@ -5021,7 +5021,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
    case nir_intrinsic_load_global_constant: {
       assert(devinfo->ver >= 8);
 
-      assert(nir_dest_bit_size(instr->dest) <= 32);
+      assert(instr->dest.ssa.bit_size <= 32);
       assert(nir_intrinsic_align(instr) > 0);
       fs_reg srcs[A64_LOGICAL_NUM_SRCS];
       srcs[A64_LOGICAL_ADDRESS] = get_nir_src(instr->src[0]);
@@ -5029,7 +5029,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
       srcs[A64_LOGICAL_ENABLE_HELPERS] =
          brw_imm_ud(nir_intrinsic_access(instr) & ACCESS_INCLUDE_HELPERS);
 
-      if (nir_dest_bit_size(instr->dest) == 32 &&
+      if (instr->dest.ssa.bit_size == 32 &&
           nir_intrinsic_align(instr) >= 4) {
          assert(nir_dest_num_components(instr->dest) <= 4);
 
@@ -5041,7 +5041,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
          inst->size_written = instr->num_components *
                               inst->dst.component_size(inst->exec_size);
       } else {
-         const unsigned bit_size = nir_dest_bit_size(instr->dest);
+         const unsigned bit_size = instr->dest.ssa.bit_size;
          assert(nir_dest_num_components(instr->dest) == 1);
          fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
 
@@ -5099,7 +5099,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
       break;
 
    case nir_intrinsic_load_global_const_block_intel: {
-      assert(nir_dest_bit_size(instr->dest) == 32);
+      assert(instr->dest.ssa.bit_size == 32);
       assert(instr->num_components == 8 || instr->num_components == 16);
 
       const fs_builder ubld = bld.exec_all().group(instr->num_components, 0);
@@ -5204,7 +5204,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
    case nir_intrinsic_load_ssbo: {
       assert(devinfo->ver >= 7);
 
-      const unsigned bit_size = nir_dest_bit_size(instr->dest);
+      const unsigned bit_size = instr->dest.ssa.bit_size;
       fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
       srcs[get_nir_src_bindless(instr->src[0]) ?
            SURFACE_LOGICAL_SRC_SURFACE_HANDLE :
@@ -5432,7 +5432,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
       assert(devinfo->ver >= 7);
 
       assert(nir_dest_num_components(instr->dest) == 1);
-      const unsigned bit_size = nir_dest_bit_size(instr->dest);
+      const unsigned bit_size = instr->dest.ssa.bit_size;
       fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
 
       if (devinfo->verx10 >= 125) {
@@ -5712,7 +5712,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
       bld.exec_all().group(1, 0).MOV(flag, brw_imm_ud(0u));
       bld.CMP(bld.null_reg_ud(), value, brw_imm_ud(0u), BRW_CONDITIONAL_NZ);
 
-      if (nir_dest_bit_size(instr->dest) > 32) {
+      if (instr->dest.ssa.bit_size > 32) {
          dest.type = BRW_REGISTER_TYPE_UQ;
       } else {
          dest.type = BRW_REGISTER_TYPE_UD;
@@ -5947,7 +5947,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
    }
 
    case nir_intrinsic_load_global_block_intel: {
-      assert(nir_dest_bit_size(instr->dest) == 32);
+      assert(instr->dest.ssa.bit_size == 32);
 
       fs_reg address = bld.emit_uniformize(get_nir_src(instr->src[0]));
 
@@ -6021,7 +6021,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
 
    case nir_intrinsic_load_shared_block_intel:
    case nir_intrinsic_load_ssbo_block_intel: {
-      assert(nir_dest_bit_size(instr->dest) == 32);
+      assert(instr->dest.ssa.bit_size == 32);
 
       const bool is_ssbo =
          instr->intrinsic == nir_intrinsic_load_ssbo_block_intel;
@@ -6287,9 +6287,9 @@ fs_visitor::nir_emit_surface_atomic(const fs_builder &bld,
     *
     * 16-bit float atomics are supported, however.
     */
-   assert(nir_dest_bit_size(instr->dest) == 32 ||
-          (nir_dest_bit_size(instr->dest) == 64 && devinfo->has_lsc) ||
-          (nir_dest_bit_size(instr->dest) == 16 &&
+   assert(instr->dest.ssa.bit_size == 32 ||
+          (instr->dest.ssa.bit_size == 64 && devinfo->has_lsc) ||
+          (instr->dest.ssa.bit_size == 16 &&
            (devinfo->has_lsc || lsc_opcode_is_atomic_float(op))));
 
    fs_reg dest = get_nir_def(instr->dest.ssa);
@@ -6336,7 +6336,7 @@ fs_visitor::nir_emit_surface_atomic(const fs_builder &bld,
 
    /* Emit the actual atomic operation */
 
-   switch (nir_dest_bit_size(instr->dest)) {
+   switch (instr->dest.ssa.bit_size) {
       case 16: {
          fs_reg dest32 = bld.vgrf(BRW_REGISTER_TYPE_UD);
          bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL,
@@ -6387,7 +6387,7 @@ fs_visitor::nir_emit_global_atomic(const fs_builder &bld,
    srcs[A64_LOGICAL_ARG] = brw_imm_ud(op);
    srcs[A64_LOGICAL_ENABLE_HELPERS] = brw_imm_ud(0);
 
-   switch (nir_dest_bit_size(instr->dest)) {
+   switch (instr->dest.ssa.bit_size) {
    case 16: {
       fs_reg dest32 = bld.vgrf(BRW_REGISTER_TYPE_UD);
       bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL,
index 1ce1e7d..b4c6eb0 100644 (file)
@@ -54,7 +54,7 @@ brw_nir_lower_load_uniforms_impl(nir_builder *b, nir_instr *instr,
 
    /* Read the first few 32-bit scalars from InlineData. */
    if (nir_src_is_const(intrin->src[0]) &&
-       nir_dest_bit_size(intrin->dest) == 32 &&
+       intrin->dest.ssa.bit_size == 32 &&
        nir_dest_num_components(intrin->dest) == 1) {
       unsigned off = nir_intrinsic_base(intrin) + nir_src_as_uint(intrin->src[0]);
       unsigned off_dw = off / 4;
@@ -1777,7 +1777,7 @@ static void
 emit_urb_direct_reads(const fs_builder &bld, nir_intrinsic_instr *instr,
                       const fs_reg &dest, fs_reg urb_handle)
 {
-   assert(nir_dest_bit_size(instr->dest) == 32);
+   assert(instr->dest.ssa.bit_size == 32);
 
    unsigned comps = nir_dest_num_components(instr->dest);
    if (comps == 0)
@@ -1819,7 +1819,7 @@ static void
 emit_urb_indirect_reads(const fs_builder &bld, nir_intrinsic_instr *instr,
                         const fs_reg &dest, const fs_reg &offset_src, fs_reg urb_handle)
 {
-   assert(nir_dest_bit_size(instr->dest) == 32);
+   assert(instr->dest.ssa.bit_size == 32);
 
    unsigned comps = nir_dest_num_components(instr->dest);
    if (comps == 0)
index 30a5b12..5715c8c 100644 (file)
@@ -158,7 +158,7 @@ analyze_ubos_block(struct ubo_analysis_state *state, nir_block *block)
 
          /* The value might span multiple 32-byte chunks. */
          const int bytes = nir_intrinsic_dest_components(intrin) *
-                           (nir_dest_bit_size(intrin->dest) / 8);
+                           (intrin->dest.ssa.bit_size / 8);
          const int start = ROUND_DOWN_TO(byte_offset, 32);
          const int end = ALIGN(byte_offset + bytes, 32);
          const int chunks = (end - start) / 32;
index d5cbfbc..5e9549f 100644 (file)
@@ -52,7 +52,7 @@ brw_nir_blockify_uniform_loads_instr(nir_builder *b,
       if (nir_src_is_divergent(intrin->src[1]))
          return false;
 
-      if (nir_dest_bit_size(intrin->dest) != 32)
+      if (intrin->dest.ssa.bit_size != 32)
          return false;
 
       /* Without the LSC, we can only do block loads of at least 4dwords (1
@@ -75,7 +75,7 @@ brw_nir_blockify_uniform_loads_instr(nir_builder *b,
       if (nir_src_is_divergent(intrin->src[0]))
          return false;
 
-      if (nir_dest_bit_size(intrin->dest) != 32)
+      if (intrin->dest.ssa.bit_size != 32)
          return false;
 
       intrin->intrinsic = nir_intrinsic_load_shared_uniform_block_intel;
@@ -85,7 +85,7 @@ brw_nir_blockify_uniform_loads_instr(nir_builder *b,
       if (nir_src_is_divergent(intrin->src[0]))
          return false;
 
-      if (nir_dest_bit_size(intrin->dest) != 32)
+      if (intrin->dest.ssa.bit_size != 32)
          return false;
 
       /* Without the LSC, we can only do block loads of at least 4dwords (1
index 9133410..6e30c86 100644 (file)
@@ -57,7 +57,7 @@ lower_alu_instr(nir_builder *b, nir_alu_instr *alu)
    nir_alu_type src_type = nir_op_infos[alu->op].input_types[0];
    nir_alu_type src_full_type = (nir_alu_type) (src_type | src_bit_size);
 
-   unsigned dst_bit_size = nir_dest_bit_size(alu->dest.dest);
+   unsigned dst_bit_size = alu->dest.dest.ssa.bit_size;
    nir_alu_type dst_full_type = nir_op_infos[alu->op].output_type;
    nir_alu_type dst_type = nir_alu_type_get_base_type(dst_full_type);
 
index 285d5cd..bf1c239 100644 (file)
@@ -82,7 +82,7 @@ lower_sparse_image_load(nir_builder *b, nir_intrinsic_instr *intrin)
    if (intrin->intrinsic == nir_intrinsic_image_sparse_load) {
       img_load = nir_image_load(b,
                                 intrin->num_components - 1,
-                                nir_dest_bit_size(intrin->dest),
+                                intrin->dest.ssa.bit_size,
                                 intrin->src[0].ssa,
                                 intrin->src[1].ssa,
                                 intrin->src[2].ssa,
@@ -92,7 +92,7 @@ lower_sparse_image_load(nir_builder *b, nir_intrinsic_instr *intrin)
    } else {
       img_load = nir_bindless_image_load(b,
                                          intrin->num_components - 1,
-                                         nir_dest_bit_size(intrin->dest),
+                                         intrin->dest.ssa.bit_size,
                                          intrin->src[0].ssa,
                                          intrin->src[1].ssa,
                                          intrin->src[2].ssa,
@@ -157,7 +157,7 @@ lower_sparse_image_load(nir_builder *b, nir_intrinsic_instr *intrin)
    tex->src[2].src = nir_src_for_ssa(nir_imm_int(b, 0));
 
    nir_def_init(&tex->instr, &tex->dest.ssa, 5,
-                nir_dest_bit_size(intrin->dest));
+                intrin->dest.ssa.bit_size);
 
    nir_builder_instr_insert(b, &tex->instr);
 
index 8ed94cb..8f7a8b6 100644 (file)
@@ -33,7 +33,7 @@ vec4_gs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
 
    switch (instr->intrinsic) {
    case nir_intrinsic_load_per_vertex_input: {
-      assert(nir_dest_bit_size(instr->dest) == 32);
+      assert(instr->dest.ssa.bit_size == 32);
       /* The EmitNoIndirectInput flag guarantees our vertex index will
        * be constant.  We should handle indirects someday.
        */
index fdf71e5..b6cf5b4 100644 (file)
@@ -423,7 +423,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
       break;
 
    case nir_intrinsic_load_input: {
-      assert(nir_dest_bit_size(instr->dest) == 32);
+      assert(instr->dest.ssa.bit_size == 32);
       /* We set EmitNoIndirectInput for VS */
       unsigned load_offset = nir_src_as_uint(instr->src[0]);
 
@@ -541,7 +541,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
       assert(devinfo->ver == 7);
 
       /* brw_nir_lower_mem_access_bit_sizes takes care of this */
-      assert(nir_dest_bit_size(instr->dest) == 32);
+      assert(instr->dest.ssa.bit_size == 32);
 
       src_reg surf_index = get_nir_ssbo_intrinsic_index(instr);
       src_reg offset_reg = retype(get_nir_src_imm(instr->src[1]),
@@ -685,7 +685,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
       src_reg packed_consts;
       if (push_reg.file != BAD_FILE) {
          packed_consts = push_reg;
-      } else if (nir_dest_bit_size(instr->dest) == 32) {
+      } else if (instr->dest.ssa.bit_size == 32) {
          packed_consts = src_reg(this, glsl_type::vec4_type);
          emit_pull_constant_load_reg(dst_reg(packed_consts),
                                      surf_index,
@@ -1082,7 +1082,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
    vec4_instruction *inst;
 
    nir_alu_type dst_type = (nir_alu_type) (nir_op_infos[instr->op].output_type |
-                                           nir_dest_bit_size(instr->dest.dest));
+                                           instr->dest.dest.ssa.bit_size);
    dst_reg dst = get_nir_def(instr->dest.dest.ssa, dst_type);
    dst.writemask &= nir_component_mask(nir_dest_num_components(instr->dest.dest));
 
@@ -1153,7 +1153,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
       break;
 
    case nir_op_iadd:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       FALLTHROUGH;
    case nir_op_fadd:
       try_immediate_source(instr, op, true);
@@ -1161,7 +1161,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
       break;
 
    case nir_op_uadd_sat:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       inst = emit(ADD(dst, op[0], op[1]));
       inst->saturate = true;
       break;
@@ -1172,7 +1172,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
       break;
 
    case nir_op_imul: {
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
 
       /* For integer multiplication, the MUL uses the low 16 bits of one of
        * the operands (src0 through SNB, src1 on IVB and later). The MACH
@@ -1206,7 +1206,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
 
    case nir_op_imul_high:
    case nir_op_umul_high: {
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       struct brw_reg acc = retype(brw_acc_reg(8), dst.type);
 
       emit(MUL(acc, op[0], op[1]));
@@ -1236,7 +1236,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
 
    case nir_op_idiv:
    case nir_op_udiv:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       emit_math(SHADER_OPCODE_INT_QUOTIENT, dst, op[0], op[1]);
       break;
 
@@ -1246,7 +1246,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
        * appears that our hardware just does the right thing for signed
        * remainder.
        */
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       emit_math(SHADER_OPCODE_INT_REMAINDER, dst, op[0], op[1]);
       break;
 
@@ -1297,7 +1297,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
       break;
 
    case nir_op_uadd_carry: {
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
 
       emit(ADDC(dst_null_ud(), op[0], op[1]));
@@ -1306,7 +1306,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
    }
 
    case nir_op_usub_borrow: {
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
 
       emit(SUBB(dst_null_ud(), op[0], op[1]));
@@ -1379,7 +1379,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
 
    case nir_op_imin:
    case nir_op_umin:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       FALLTHROUGH;
    case nir_op_fmin:
       try_immediate_source(instr, op, true);
@@ -1388,7 +1388,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
 
    case nir_op_imax:
    case nir_op_umax:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       FALLTHROUGH;
    case nir_op_fmax:
       try_immediate_source(instr, op, true);
@@ -1409,7 +1409,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
    case nir_op_uge32:
    case nir_op_ieq32:
    case nir_op_ine32:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       FALLTHROUGH;
    case nir_op_flt32:
    case nir_op_fge32:
@@ -1444,7 +1444,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
    case nir_op_b32all_iequal2:
    case nir_op_b32all_iequal3:
    case nir_op_b32all_iequal4:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       FALLTHROUGH;
    case nir_op_b32all_fequal2:
    case nir_op_b32all_fequal3:
@@ -1463,7 +1463,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
    case nir_op_b32any_inequal2:
    case nir_op_b32any_inequal3:
    case nir_op_b32any_inequal4:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       FALLTHROUGH;
    case nir_op_b32any_fnequal2:
    case nir_op_b32any_fnequal3:
@@ -1481,24 +1481,24 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
    }
 
    case nir_op_inot:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       emit(NOT(dst, op[0]));
       break;
 
    case nir_op_ixor:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       try_immediate_source(instr, op, true);
       emit(XOR(dst, op[0], op[1]));
       break;
 
    case nir_op_ior:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       try_immediate_source(instr, op, true);
       emit(OR(dst, op[0], op[1]));
       break;
 
    case nir_op_iand:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       try_immediate_source(instr, op, true);
       emit(AND(dst, op[0], op[1]));
       break;
@@ -1506,7 +1506,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
    case nir_op_b2i32:
    case nir_op_b2f32:
    case nir_op_b2f64:
-      if (nir_dest_bit_size(instr->dest.dest) > 32) {
+      if (instr->dest.dest.ssa.bit_size > 32) {
          assert(dst.type == BRW_REGISTER_TYPE_DF);
          emit_conversion_to_double(dst, negate(op[0]));
       } else {
@@ -1584,39 +1584,39 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
       break;
 
    case nir_op_unpack_unorm_4x8:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       emit_unpack_unorm_4x8(dst, op[0]);
       break;
 
    case nir_op_pack_unorm_4x8:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       emit_pack_unorm_4x8(dst, op[0]);
       break;
 
    case nir_op_unpack_snorm_4x8:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       emit_unpack_snorm_4x8(dst, op[0]);
       break;
 
    case nir_op_pack_snorm_4x8:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       emit_pack_snorm_4x8(dst, op[0]);
       break;
 
    case nir_op_bitfield_reverse:
-      assert(nir_dest_bit_size(instr->dest.dest) == 32);
+      assert(instr->dest.dest.ssa.bit_size == 32);
       assert(nir_src_bit_size(instr->src[0].src) == 32);
       emit(BFREV(dst, op[0]));
       break;
 
    case nir_op_bit_count:
-      assert(nir_dest_bit_size(instr->dest.dest) == 32);
+      assert(instr->dest.dest.ssa.bit_size == 32);
       assert(nir_src_bit_size(instr->src[0].src) < 64);
       emit(CBIT(dst, op[0]));
       break;
 
    case nir_op_ifind_msb: {
-      assert(nir_dest_bit_size(instr->dest.dest) == 32);
+      assert(instr->dest.dest.ssa.bit_size == 32);
       assert(nir_src_bit_size(instr->src[0].src) == 32);
       assert(devinfo->ver >= 7);
 
@@ -1639,13 +1639,13 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
    }
 
    case nir_op_uclz:
-      assert(nir_dest_bit_size(instr->dest.dest) == 32);
+      assert(instr->dest.dest.ssa.bit_size == 32);
       assert(nir_src_bit_size(instr->src[0].src) == 32);
       emit(LZD(dst, op[0]));
       break;
 
    case nir_op_find_lsb:
-      assert(nir_dest_bit_size(instr->dest.dest) == 32);
+      assert(instr->dest.dest.ssa.bit_size == 32);
       assert(nir_src_bit_size(instr->src[0].src) == 32);
       assert(devinfo->ver >= 7);
       emit(FBL(dst, op[0]));
@@ -1656,7 +1656,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
       unreachable("should have been lowered");
    case nir_op_ubfe:
    case nir_op_ibfe:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       op[0] = fix_3src_operand(op[0]);
       op[1] = fix_3src_operand(op[1]);
       op[2] = fix_3src_operand(op[2]);
@@ -1665,12 +1665,12 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
       break;
 
    case nir_op_bfm:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       emit(BFI1(dst, op[0], op[1]));
       break;
 
    case nir_op_bfi:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       op[0] = fix_3src_operand(op[0]);
       op[1] = fix_3src_operand(op[1]);
       op[2] = fix_3src_operand(op[2]);
@@ -1733,19 +1733,19 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
       break;
 
    case nir_op_ishl:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       try_immediate_source(instr, op, false);
       emit(SHL(dst, op[0], op[1]));
       break;
 
    case nir_op_ishr:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       try_immediate_source(instr, op, false);
       emit(ASR(dst, op[0], op[1]));
       break;
 
    case nir_op_ushr:
-      assert(nir_dest_bit_size(instr->dest.dest) < 64);
+      assert(instr->dest.dest.ssa.bit_size < 64);
       try_immediate_source(instr, op, false);
       emit(SHR(dst, op[0], op[1]));
       break;
index b132611..aa3fe85 100644 (file)
@@ -253,7 +253,7 @@ vec4_tcs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
                brw_imm_d(key->input_vertices)));
       break;
    case nir_intrinsic_load_per_vertex_input: {
-      assert(nir_dest_bit_size(instr->dest) == 32);
+      assert(instr->dest.ssa.bit_size == 32);
       src_reg indirect_offset = get_indirect_offset(instr);
       unsigned imm_offset = nir_intrinsic_base(instr);
 
index 6a2d03f..ef2bea7 100644 (file)
@@ -149,7 +149,7 @@ vec4_tes_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
 
    case nir_intrinsic_load_input:
    case nir_intrinsic_load_per_vertex_input: {
-      assert(nir_dest_bit_size(instr->dest) == 32);
+      assert(instr->dest.ssa.bit_size == 32);
       src_reg indirect_offset = get_indirect_offset(instr);
       unsigned imm_offset = instr->const_index[0];
       src_reg header = input_read_header;
index 7958ad2..bbee1d6 100644 (file)
@@ -1158,7 +1158,7 @@ try_lower_direct_buffer_intrinsic(nir_builder *b,
       /* 64-bit atomics only support A64 messages so we can't lower them to
        * the index+offset model.
        */
-      if (is_atomic && nir_dest_bit_size(intrin->dest) == 64 &&
+      if (is_atomic && intrin->dest.ssa.bit_size == 64 &&
           !state->pdevice->info.has_lsc)
          return false;
 
index cd5efa1..e58e369 100644 (file)
@@ -211,7 +211,7 @@ anv_nir_push_desc_ubo_fully_promoted(nir_shader *nir,
                /* Check if the load was promoted to a push constant. */
                const unsigned load_offset = const_load_offset[0].u32;
                const int load_bytes = nir_intrinsic_dest_components(intrin) *
-                  (nir_dest_bit_size(intrin->dest) / 8);
+                  (intrin->dest.ssa.bit_size / 8);
 
                for (unsigned i = 0; i < ARRAY_SIZE(bind_map->push_ranges); i++) {
                   if (bind_map->push_ranges[i].set == binding->set &&
index 15ad6e1..8279888 100644 (file)
@@ -152,7 +152,7 @@ create_plane_tex_instr_implicit(struct ycbcr_state *state,
    tex->is_array = old_tex->is_array;
 
    nir_def_init(&tex->instr, &tex->dest.ssa, old_tex->dest.ssa.num_components,
-                nir_dest_bit_size(old_tex->dest));
+                old_tex->dest.ssa.bit_size);
    nir_builder_instr_insert(b, &tex->instr);
 
    return &tex->dest.ssa;
index 041c724..1155344 100644 (file)
@@ -577,7 +577,7 @@ static bool shader_has_double(nir_shader *nir)
              const nir_op_info *info = &nir_op_infos[alu->op];
 
              if (info->output_type & nir_type_float &&
-                 nir_dest_bit_size(alu->dest.dest) == 64)
+                 alu->dest.dest.ssa.bit_size == 64)
                  return true;
          }
       }
index 3324379..93775d3 100644 (file)
@@ -36,13 +36,13 @@ load_ubo(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var, unsigned
 {
    return nir_load_ubo(b,
                        nir_dest_num_components(intr->dest),
-                       nir_dest_bit_size(intr->dest),
+                       intr->dest.ssa.bit_size,
                        nir_imm_int(b, var->data.binding),
                        nir_imm_int(b, offset),
                        .align_mul = 256,
                        .align_offset = offset,
                        .range_base = offset,
-                       .range = nir_dest_bit_size(intr->dest) * nir_dest_num_components(intr->dest) / 8);
+                       .range = intr->dest.ssa.bit_size * nir_dest_num_components(intr->dest) / 8);
 }
 
 static bool
@@ -146,7 +146,7 @@ lower_load_kernel_input(nir_builder *b, nir_intrinsic_instr *intr,
 {
    b->cursor = nir_before_instr(&intr->instr);
 
-   unsigned bit_size = nir_dest_bit_size(intr->dest);
+   unsigned bit_size = intr->dest.ssa.bit_size;
    enum glsl_base_type base_type;
 
    switch (bit_size) {
index e1fe77e..606dab0 100644 (file)
@@ -70,7 +70,7 @@ load_comps_to_vec(nir_builder *b, unsigned src_bit_size,
 static bool
 lower_32b_offset_load(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var)
 {
-   unsigned bit_size = nir_dest_bit_size(intr->dest);
+   unsigned bit_size = intr->dest.ssa.bit_size;
    unsigned num_components = nir_dest_num_components(intr->dest);
    unsigned num_bits = num_components * bit_size;
 
index fc87ba9..b141252 100644 (file)
@@ -2318,7 +2318,7 @@ emit_cmp(struct ntd_context *ctx, nir_alu_instr *alu,
 static enum dxil_cast_opcode
 get_cast_op(nir_alu_instr *alu)
 {
-   unsigned dst_bits = nir_dest_bit_size(alu->dest.dest);
+   unsigned dst_bits = alu->dest.dest.ssa.bit_size;
    unsigned src_bits = nir_src_bit_size(alu->src[0].src);
 
    switch (alu->op) {
@@ -2399,7 +2399,7 @@ get_cast_op(nir_alu_instr *alu)
 static const struct dxil_type *
 get_cast_dest_type(struct ntd_context *ctx, nir_alu_instr *alu)
 {
-   unsigned dst_bits = nir_dest_bit_size(alu->dest.dest);
+   unsigned dst_bits = alu->dest.dest.ssa.bit_size;
    switch (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type)) {
    case nir_type_bool:
       assert(dst_bits == 1);
@@ -2436,7 +2436,7 @@ emit_cast(struct ntd_context *ctx, nir_alu_instr *alu,
    switch (opcode) {
    case DXIL_CAST_UITOFP:
    case DXIL_CAST_SITOFP:
-      if (is_double(info->output_type, nir_dest_bit_size(alu->dest.dest)))
+      if (is_double(info->output_type, alu->dest.dest.ssa.bit_size))
          ctx->mod.feats.dx11_1_double_extensions = true;
       break;
    case DXIL_CAST_FPTOUI:
@@ -2448,7 +2448,7 @@ emit_cast(struct ntd_context *ctx, nir_alu_instr *alu,
       break;
    }
 
-   if (nir_dest_bit_size(alu->dest.dest) == 16) {
+   if (alu->dest.dest.ssa.bit_size == 16) {
       switch (alu->op) {
       case nir_op_f2fmp:
       case nir_op_i2imp:
@@ -2540,7 +2540,7 @@ emit_binary_intin(struct ntd_context *ctx, nir_alu_instr *alu,
    const nir_op_info *info = &nir_op_infos[alu->op];
    assert(info->output_type == info->input_types[0]);
    assert(info->output_type == info->input_types[1]);
-   unsigned dst_bits = nir_dest_bit_size(alu->dest.dest);
+   unsigned dst_bits = alu->dest.dest.ssa.bit_size;
    assert(nir_src_bit_size(alu->src[0].src) == dst_bits);
    assert(nir_src_bit_size(alu->src[1].src) == dst_bits);
    enum overload_type overload = get_overload(info->output_type, dst_bits);
@@ -2561,7 +2561,7 @@ emit_tertiary_intin(struct ntd_context *ctx, nir_alu_instr *alu,
                     const struct dxil_value *op2)
 {
    const nir_op_info *info = &nir_op_infos[alu->op];
-   unsigned dst_bits = nir_dest_bit_size(alu->dest.dest);
+   unsigned dst_bits = alu->dest.dest.ssa.bit_size;
    assert(nir_src_bit_size(alu->src[0].src) == dst_bits);
    assert(nir_src_bit_size(alu->src[1].src) == dst_bits);
    assert(nir_src_bit_size(alu->src[2].src) == dst_bits);
@@ -2900,7 +2900,8 @@ emit_alu(struct ntd_context *ctx, nir_alu_instr *alu)
          /* It's illegal to emit a literal divide by 0 in DXIL */
          nir_scalar divisor = nir_scalar_chase_alu_src(nir_get_ssa_scalar(&alu->dest.dest.ssa, 0), 1);
          if (nir_scalar_as_int(divisor) == 0) {
-            store_alu_dest(ctx, alu, 0, dxil_module_get_int_const(&ctx->mod, 0, nir_dest_bit_size(alu->dest.dest)));
+            store_alu_dest(ctx, alu, 0,
+                           dxil_module_get_int_const(&ctx->mod, 0, alu->dest.dest.ssa.bit_size));
             return true;
          }
       }
@@ -3440,7 +3441,7 @@ emit_load_ssbo(struct ntd_context *ctx, nir_intrinsic_instr *intr)
       emit_raw_bufferload_call(ctx, handle, coord,
                                overload,
                                nir_intrinsic_dest_components(intr),
-                               nir_dest_bit_size(intr->dest) / 8) :
+                               intr->dest.ssa.bit_size / 8) :
       emit_bufferload_call(ctx, handle, coord, overload);
    if (!load)
       return false;
@@ -3452,7 +3453,7 @@ emit_load_ssbo(struct ntd_context *ctx, nir_intrinsic_instr *intr)
          return false;
       store_def(ctx, &intr->dest.ssa, i, val);
    }
-   if (nir_dest_bit_size(intr->dest) == 16)
+   if (intr->dest.ssa.bit_size == 16)
       ctx->mod.feats.native_low_precision = true;
    return true;
 }
@@ -3530,7 +3531,7 @@ emit_load_ubo_vec4(struct ntd_context *ctx, nir_intrinsic_instr *intr)
       store_def(ctx, &intr->dest.ssa, i,
                  dxil_emit_extractval(&ctx->mod, agg, i + first_component));
 
-   if (nir_dest_bit_size(intr->dest) == 16)
+   if (intr->dest.ssa.bit_size == 16)
       ctx->mod.feats.native_low_precision = true;
    return true;
 }
@@ -3899,7 +3900,7 @@ emit_load_deref(struct ntd_context *ctx, nir_intrinsic_instr *intr)
       return false;
 
    const struct dxil_value *retval =
-      dxil_emit_load(&ctx->mod, ptr, nir_dest_bit_size(intr->dest) / 8, false);
+      dxil_emit_load(&ctx->mod, ptr, intr->dest.ssa.bit_size / 8, false);
    if (!retval)
       return false;
 
@@ -4148,7 +4149,7 @@ emit_image_load(struct ntd_context *ctx, nir_intrinsic_instr *intr)
    if (!load_result)
       return false;
 
-   assert(nir_dest_bit_size(intr->dest) == 32);
+   assert(intr->dest.ssa.bit_size == 32);
    unsigned num_components = nir_dest_num_components(intr->dest);
    assert(num_components <= 4);
    for (unsigned i = 0; i < num_components; ++i) {
index b824b87..7fde987 100644 (file)
@@ -250,13 +250,13 @@ lower_shader_system_values(struct nir_builder *builder, nir_instr *instr,
    nir_def *load_data = nir_load_ubo(
       builder, 
       nir_dest_num_components(intrin->dest),
-      nir_dest_bit_size(intrin->dest),
+      intrin->dest.ssa.bit_size,
       nir_channel(builder, load_desc, 0),
       nir_imm_int(builder, offset),
       .align_mul = 256,
       .align_offset = offset,
       .range_base = offset,
-      .range = nir_dest_bit_size(intrin->dest) * nir_dest_num_components(intrin->dest) / 8);
+      .range = intrin->dest.ssa.bit_size * nir_dest_num_components(intrin->dest) / 8);
 
    nir_def_rewrite_uses(&intrin->dest.ssa, load_data);
    nir_instr_remove(instr);
@@ -339,7 +339,7 @@ lower_load_push_constant(struct nir_builder *builder, nir_instr *instr,
    nir_def *load_data = nir_load_ubo(
       builder, 
       nir_dest_num_components(intrin->dest),
-      nir_dest_bit_size(intrin->dest)
+      intrin->dest.ssa.bit_size
       nir_channel(builder, load_desc, 0),
       nir_iadd_imm(builder, offset, base),
       .align_mul = nir_intrinsic_align_mul(intrin),
@@ -596,7 +596,7 @@ kill_undefined_varyings(struct nir_builder *b,
     * the DXIL requirements of writing all position components.
     */
    nir_def *zero = nir_imm_zero(b, nir_dest_num_components(intr->dest),
-                                       nir_dest_bit_size(intr->dest));
+                                       intr->dest.ssa.bit_size);
    nir_def_rewrite_uses(&intr->dest.ssa, zero);
    nir_instr_remove(instr);
    return true;
index 4cee4f6..1aeec90 100644 (file)
@@ -84,7 +84,7 @@ bi_lower_divergent_indirects_impl(nir_builder *b, nir_instr *instr, void *data)
 
    /* Write zero in a funny way to bypass lower_load_const_to_scalar */
    bool has_dest = nir_intrinsic_infos[intr->intrinsic].has_dest;
-   unsigned size = has_dest ? nir_dest_bit_size(intr->dest) : 32;
+   unsigned size = has_dest ? intr->dest.ssa.bit_size : 32;
    nir_def *zero = has_dest ? nir_imm_zero(b, 1, size) : NULL;
    nir_def *zeroes[4] = {zero, zero, zero, zero};
    nir_def *res =
index 54dfdd3..15944cd 100644 (file)
@@ -392,7 +392,7 @@ bi_copy_component(bi_builder *b, nir_intrinsic_instr *instr, bi_index tmp)
    unsigned component = nir_intrinsic_component(instr);
    unsigned nr = instr->num_components;
    unsigned total = nr + component;
-   unsigned bitsize = nir_dest_bit_size(instr->dest);
+   unsigned bitsize = instr->dest.ssa.bit_size;
 
    assert(total <= 4 && "should be vec4");
    bi_emit_cached_split(b, tmp, total * bitsize);
@@ -404,7 +404,7 @@ bi_copy_component(bi_builder *b, nir_intrinsic_instr *instr, bi_index tmp)
    unsigned channels[] = {component, component + 1, component + 2};
 
    bi_make_vec_to(b, bi_def_index(&instr->dest.ssa), srcs, channels, nr,
-                  nir_dest_bit_size(instr->dest));
+                  instr->dest.ssa.bit_size);
 }
 
 static void
@@ -502,7 +502,7 @@ bi_emit_load_vary(bi_builder *b, nir_intrinsic_instr *instr)
    bi_index dest =
       (component == 0) ? bi_def_index(&instr->dest.ssa) : bi_temp(b->shader);
 
-   unsigned sz = nir_dest_bit_size(instr->dest);
+   unsigned sz = instr->dest.ssa.bit_size;
 
    if (smooth) {
       nir_intrinsic_instr *parent = nir_src_as_intrinsic(instr->src[0]);
@@ -1050,7 +1050,7 @@ bi_emit_load_ubo(bi_builder *b, nir_intrinsic_instr *instr)
    bi_index dyn_offset = bi_src_index(offset);
    uint32_t const_offset = offset_is_const ? nir_src_as_uint(*offset) : 0;
 
-   bi_load_ubo_to(b, instr->num_components * nir_dest_bit_size(instr->dest),
+   bi_load_ubo_to(b, instr->num_components * instr->dest.ssa.bit_size,
                   bi_def_index(&instr->dest.ssa),
                   offset_is_const ? bi_imm_u32(const_offset) : dyn_offset,
                   bi_src_index(&instr->src[0]));
@@ -1067,7 +1067,7 @@ bi_emit_load_push_constant(bi_builder *b, nir_intrinsic_instr *instr)
    assert((base & 3) == 0 && "unaligned push constants");
 
    unsigned bits =
-      nir_dest_bit_size(instr->dest) * nir_dest_num_components(instr->dest);
+      instr->dest.ssa.bit_size * nir_dest_num_components(instr->dest);
 
    unsigned n = DIV_ROUND_UP(bits, 32);
    assert(n <= 4);
@@ -1124,7 +1124,7 @@ static void
 bi_emit_load(bi_builder *b, nir_intrinsic_instr *instr, enum bi_seg seg)
 {
    int16_t offset = 0;
-   unsigned bits = instr->num_components * nir_dest_bit_size(instr->dest);
+   unsigned bits = instr->num_components * instr->dest.ssa.bit_size;
    bi_index dest = bi_def_index(&instr->dest.ssa);
    bi_index addr_lo = bi_extract(b, bi_src_index(&instr->src[0]), 0);
    bi_index addr_hi = bi_addr_high(b, &instr->src[0]);
@@ -1471,7 +1471,7 @@ bi_emit_ld_tile(bi_builder *b, nir_intrinsic_instr *instr)
    bi_index dest = bi_def_index(&instr->dest.ssa);
    nir_alu_type T = nir_intrinsic_dest_type(instr);
    enum bi_register_format regfmt = bi_reg_fmt_for_nir(T);
-   unsigned size = nir_dest_bit_size(instr->dest);
+   unsigned size = instr->dest.ssa.bit_size;
    unsigned nr = instr->num_components;
 
    /* Get the render target */
@@ -2096,7 +2096,7 @@ bi_emit_alu(bi_builder *b, nir_alu_instr *instr)
 {
    bi_index dst = bi_def_index(&instr->dest.dest.ssa);
    unsigned srcs = nir_op_infos[instr->op].num_inputs;
-   unsigned sz = nir_dest_bit_size(instr->dest.dest);
+   unsigned sz = instr->dest.dest.ssa.bit_size;
    unsigned comps = nir_dest_num_components(instr->dest.dest);
    unsigned src_sz = srcs > 0 ? nir_src_bit_size(instr->src[0].src) : 0;
 
@@ -3245,9 +3245,8 @@ bi_emit_texc(bi_builder *b, nir_tex_instr *instr)
       .shadow_or_clamp_disable = instr->is_shadow,
       .array = instr->is_array,
       .dimension = bifrost_tex_format(instr->sampler_dim),
-      .format =
-         bi_texture_format(instr->dest_type | nir_dest_bit_size(instr->dest),
-                           BI_CLAMP_NONE), /* TODO */
+      .format = bi_texture_format(instr->dest_type | instr->dest.ssa.bit_size,
+                                  BI_CLAMP_NONE), /* TODO */
       .mask = 0xF,
    };
 
@@ -3410,7 +3409,7 @@ bi_emit_texc(bi_builder *b, nir_tex_instr *instr)
          dregs[sr_count++] = dregs[i];
    }
 
-   unsigned res_size = nir_dest_bit_size(instr->dest) == 16 ? 2 : 4;
+   unsigned res_size = instr->dest.ssa.bit_size == 16 ? 2 : 4;
 
    bi_index sr = sr_count ? bi_temp(b->shader) : bi_null();
    bi_index dst = bi_temp(b->shader);
@@ -3565,7 +3564,7 @@ bi_emit_tex_valhall(bi_builder *b, nir_tex_instr *instr)
 
    /* Only write the components that we actually read */
    unsigned mask = nir_def_components_read(&instr->dest.ssa);
-   unsigned comps_per_reg = nir_dest_bit_size(instr->dest) == 16 ? 2 : 1;
+   unsigned comps_per_reg = instr->dest.ssa.bit_size == 16 ? 2 : 1;
    unsigned res_size = DIV_ROUND_UP(util_bitcount(mask), comps_per_reg);
 
    enum bi_register_format regfmt = bi_reg_fmt_for_nir(instr->dest_type);
@@ -3619,7 +3618,7 @@ bi_emit_tex_valhall(bi_builder *b, nir_tex_instr *instr)
 
    bi_make_vec_to(b, bi_def_index(&instr->dest.ssa), unpacked, comps,
                   nir_dest_num_components(instr->dest),
-                  nir_dest_bit_size(instr->dest));
+                  instr->dest.ssa.bit_size);
 }
 
 /* Simple textures ops correspond to NIR tex or txl with LOD = 0 on 2D/cube
@@ -3637,13 +3636,12 @@ bi_emit_texs(bi_builder *b, nir_tex_instr *instr)
       bi_index face, s, t;
       bi_emit_cube_coord(b, coords, &face, &s, &t);
 
-      bi_texs_cube_to(b, nir_dest_bit_size(instr->dest),
+      bi_texs_cube_to(b, instr->dest.ssa.bit_size,
                       bi_def_index(&instr->dest.ssa), s, t, face,
                       instr->sampler_index, instr->texture_index);
    } else {
-      bi_texs_2d_to(b, nir_dest_bit_size(instr->dest),
-                    bi_def_index(&instr->dest.ssa), bi_extract(b, coords, 0),
-                    bi_extract(b, coords, 1),
+      bi_texs_2d_to(b, instr->dest.ssa.bit_size, bi_def_index(&instr->dest.ssa),
+                    bi_extract(b, coords, 0), bi_extract(b, coords, 1),
                     instr->op != nir_texop_tex, /* zero LOD */
                     instr->sampler_index, instr->texture_index);
    }
@@ -4234,7 +4232,7 @@ bi_vectorize_filter(const nir_instr *instr, const void *data)
    }
 
    /* Vectorized instructions cannot write more than 32-bit */
-   int dst_bit_size = nir_dest_bit_size(alu->dest.dest);
+   int dst_bit_size = alu->dest.dest.ssa.bit_size;
    if (dst_bit_size == 16)
       return 2;
    else
@@ -4627,7 +4625,7 @@ bi_lower_load_output(nir_builder *b, nir_instr *instr, UNUSED void *data)
       b, .base = rt, .src_type = nir_intrinsic_dest_type(intr));
 
    nir_def *lowered = nir_load_converted_output_pan(
-      b, nir_dest_num_components(intr->dest), nir_dest_bit_size(intr->dest),
+      b, nir_dest_num_components(intr->dest), intr->dest.ssa.bit_size,
       conversion, .dest_type = nir_intrinsic_dest_type(intr),
       .io_semantics = nir_intrinsic_io_semantics(intr));
 
index bd33a20..9e42e1d 100644 (file)
@@ -223,7 +223,7 @@ midgard_nir_lower_global_load_instr(nir_builder *b, nir_instr *instr,
        intr->intrinsic != nir_intrinsic_load_shared)
       return false;
 
-   unsigned compsz = nir_dest_bit_size(intr->dest);
+   unsigned compsz = intr->dest.ssa.bit_size;
    unsigned totalsz = compsz * nir_dest_num_components(intr->dest);
    /* 8, 16, 32, 64 and 128 bit loads don't need to be lowered */
    if (util_bitcount(totalsz) < 2 && totalsz <= 128)
@@ -286,7 +286,7 @@ mdg_should_scalarize(const nir_instr *instr, const void *_unused)
    if (nir_src_bit_size(alu->src[0].src) == 64)
       return true;
 
-   if (nir_dest_bit_size(alu->dest.dest) == 64)
+   if (alu->dest.dest.ssa.bit_size == 64)
       return true;
 
    switch (alu->op) {
@@ -319,7 +319,7 @@ midgard_vectorize_filter(const nir_instr *instr, const void *data)
 
    const nir_alu_instr *alu = nir_instr_as_alu(instr);
    int src_bit_size = nir_src_bit_size(alu->src[0].src);
-   int dst_bit_size = nir_dest_bit_size(alu->dest.dest);
+   int dst_bit_size = alu->dest.dest.ssa.bit_size;
 
    if (src_bit_size == 64 || dst_bit_size == 64)
       return 2;
@@ -647,7 +647,7 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
    bool flip_src12 = false;
 
    ASSERTED unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
-   unsigned dst_bitsize = nir_dest_bit_size(instr->dest.dest);
+   unsigned dst_bitsize = instr->dest.dest.ssa.bit_size;
 
    enum midgard_roundmode roundmode = MIDGARD_RTE;
 
@@ -991,7 +991,7 @@ mir_set_intr_mask(nir_instr *instr, midgard_instruction *ins, bool is_read)
       nir_mask = mask_of(nir_intrinsic_dest_components(intr));
 
       /* Extension is mandatory for 8/16-bit loads */
-      dsize = nir_dest_bit_size(intr->dest) == 64 ? 64 : 32;
+      dsize = intr->dest.ssa.bit_size == 64 ? 64 : 32;
    } else {
       nir_mask = nir_intrinsic_write_mask(intr);
       dsize = OP_IS_COMMON_STORE(ins->op) ? nir_src_bit_size(intr->src[0]) : 32;
@@ -1013,10 +1013,9 @@ emit_ubo_read(compiler_context *ctx, nir_instr *instr, unsigned dest,
 {
    midgard_instruction ins;
 
-   unsigned dest_size =
-      (instr->type == nir_instr_type_intrinsic)
-         ? nir_dest_bit_size(nir_instr_as_intrinsic(instr)->dest)
-         : 32;
+   unsigned dest_size = (instr->type == nir_instr_type_intrinsic)
+                           ? nir_instr_as_intrinsic(instr)->dest.ssa.bit_size
+                           : 32;
 
    unsigned bitsize = dest_size * nr_comps;
 
@@ -1072,7 +1071,7 @@ emit_global(compiler_context *ctx, nir_instr *instr, bool is_read,
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
    if (is_read) {
       unsigned bitsize =
-         nir_dest_bit_size(intr->dest) * nir_dest_num_components(intr->dest);
+         intr->dest.ssa.bit_size * nir_dest_num_components(intr->dest);
 
       switch (bitsize) {
       case 8:
@@ -1099,7 +1098,7 @@ emit_global(compiler_context *ctx, nir_instr *instr, bool is_read,
       /* For anything not aligned on 32bit, make sure we write full
        * 32 bits registers. */
       if (bitsize & 31) {
-         unsigned comps_per_32b = 32 / nir_dest_bit_size(intr->dest);
+         unsigned comps_per_32b = 32 / intr->dest.ssa.bit_size;
 
          for (unsigned c = 0; c < 4 * comps_per_32b; c += comps_per_32b) {
             if (!(ins.mask & BITFIELD_RANGE(c, comps_per_32b)))
@@ -1555,7 +1554,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
          v_mov(nir_reg_index(handle), nir_def_index(&instr->dest.ssa));
 
       ins.dest_type = ins.src_types[1] =
-         nir_type_uint | nir_dest_bit_size(instr->dest);
+         nir_type_uint | instr->dest.ssa.bit_size;
 
       ins.mask = BITFIELD_MASK(nir_dest_num_components(instr->dest));
       emit_mir_instruction(ctx, ins);
@@ -1643,7 +1642,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
          emit_global(ctx, &instr->instr, true, reg, src_offset, seg);
       } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->inputs->is_blend) {
          emit_varying_read(ctx, reg, offset, nr_comp, component,
-                           indirect_offset, t | nir_dest_bit_size(instr->dest),
+                           indirect_offset, t | instr->dest.ssa.bit_size,
                            is_flat);
       } else if (ctx->inputs->is_blend) {
          /* ctx->blend_input will be precoloured to r0/r2, where
@@ -1708,7 +1707,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
    case nir_intrinsic_load_output: {
       reg = nir_def_index(&instr->dest.ssa);
 
-      unsigned bits = nir_dest_bit_size(instr->dest);
+      unsigned bits = instr->dest.ssa.bit_size;
 
       midgard_instruction ld;
       if (bits == 16)
index 4ac35f4..4925bb8 100644 (file)
@@ -60,7 +60,7 @@ nir_fuse_io_16(nir_shader *shader)
             if (intr->intrinsic != nir_intrinsic_load_interpolated_input)
                continue;
 
-            if (nir_dest_bit_size(intr->dest) != 32)
+            if (intr->dest.ssa.bit_size != 32)
                continue;
 
             /* We swizzle at a 32-bit level so need a multiple of 2. We could
index 5785d77..72682dd 100644 (file)
@@ -52,7 +52,7 @@ nir_lower_64bit_intrin_instr(nir_builder *b, nir_instr *instr, void *data)
       return false;
    }
 
-   if (nir_dest_bit_size(intr->dest) != 64)
+   if (intr->dest.ssa.bit_size != 64)
       return false;
 
    b->cursor = nir_after_instr(instr);
index cd387e6..8436586 100644 (file)
@@ -547,7 +547,7 @@ pan_lower_fb_load(nir_builder *b, nir_intrinsic_instr *intr,
     * the result is undefined.
     */
 
-   unsigned bits = nir_dest_bit_size(intr->dest);
+   unsigned bits = intr->dest.ssa.bit_size;
 
    nir_alu_type src_type =
       nir_alu_type_get_base_type(pan_unpacked_type_for_format(desc));
index c7c68dc..531e98c 100644 (file)
@@ -55,8 +55,8 @@ pan_lower_sample_pos_impl(struct nir_builder *b, nir_instr *instr,
    nir_def *decoded = nir_fmul_imm(b, nir_i2f16(b, raw), 1.0 / 256.0);
 
    /* Make NIR validator happy */
-   if (decoded->bit_size != nir_dest_bit_size(intr->dest))
-      decoded = nir_f2fN(b, decoded, nir_dest_bit_size(intr->dest));
+   if (decoded->bit_size != intr->dest.ssa.bit_size)
+      decoded = nir_f2fN(b, decoded, intr->dest.ssa.bit_size);
 
    nir_def_rewrite_uses(&intr->dest.ssa, decoded);
    return true;
index 5ba814c..27cf9a0 100644 (file)
@@ -49,10 +49,10 @@ static nir_def *
 load_sysval_from_ubo(nir_builder *b, nir_intrinsic_instr *intr, unsigned offset)
 {
    return nir_load_ubo(
-      b, nir_dest_num_components(intr->dest), nir_dest_bit_size(intr->dest),
+      b, nir_dest_num_components(intr->dest), intr->dest.ssa.bit_size,
       nir_imm_int(b, PANVK_SYSVAL_UBO_INDEX), nir_imm_int(b, offset),
-      .align_mul = nir_dest_bit_size(intr->dest) / 8, .align_offset = 0,
-      .range_base = offset, .range = nir_dest_bit_size(intr->dest) / 8);
+      .align_mul = intr->dest.ssa.bit_size / 8, .align_offset = 0,
+      .range_base = offset, .range = intr->dest.ssa.bit_size / 8);
 }
 
 struct sysval_options {
@@ -190,9 +190,9 @@ panvk_lower_load_push_constant(nir_builder *b, nir_instr *instr, void *data)
 
    b->cursor = nir_before_instr(instr);
    nir_def *ubo_load = nir_load_ubo(
-      b, nir_dest_num_components(intr->dest), nir_dest_bit_size(intr->dest),
+      b, nir_dest_num_components(intr->dest), intr->dest.ssa.bit_size,
       nir_imm_int(b, PANVK_PUSH_CONST_UBO_INDEX), intr->src[0].ssa,
-      .align_mul = nir_dest_bit_size(intr->dest) / 8, .align_offset = 0,
+      .align_mul = intr->dest.ssa.bit_size / 8, .align_offset = 0,
       .range_base = nir_intrinsic_base(intr),
       .range = nir_intrinsic_range(intr));
    nir_def_rewrite_uses(&intr->dest.ssa, ubo_load);
index bed9baa..85dfc95 100644 (file)
@@ -271,7 +271,7 @@ create_plane_tex_instr_implicit(struct ycbcr_state *state,
    tex->is_array = old_tex->is_array;
 
    nir_def_init(&tex->instr, &tex->dest.ssa, old_tex->dest.ssa.num_components,
-                nir_dest_bit_size(old_tex->dest));
+                old_tex->dest.ssa.bit_size);
    nir_builder_instr_insert(b, &tex->instr);
 
    return &tex->dest.ssa;