From: Faith Ekstrand Date: Mon, 14 Aug 2023 16:56:00 +0000 (-0500) Subject: nir: Drop nir_dest X-Git-Tag: upstream/23.3.3~3329 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4695bebc7951356c913b4695f36863ba4544e816;p=platform%2Fupstream%2Fmesa.git nir: Drop nir_dest Instead, we replace every use of it with nir_def. Most of this commit was generated by sed: sed -i -e 's/dest.ssa/def/g' src/**/*.h src/**/*.c src/**/*.cpp A few manual fixups were required in lima and the nir_legacy code. Acked-by: Alyssa Rosenzweig Part-of: --- diff --git a/src/amd/common/ac_nir.c b/src/amd/common/ac_nir.c index 2e4a09e..c7425a4 100644 --- a/src/amd/common/ac_nir.c +++ b/src/amd/common/ac_nir.c @@ -134,7 +134,7 @@ lower_intrinsic_to_arg(nir_builder *b, nir_instr *instr, void *state) } assert(replacement); - nir_def_rewrite_uses(&intrin->dest.ssa, replacement); + nir_def_rewrite_uses(&intrin->def, replacement); nir_instr_remove(&intrin->instr); return true; } diff --git a/src/amd/common/ac_nir_lower_esgs_io_to_mem.c b/src/amd/common/ac_nir_lower_esgs_io_to_mem.c index ffe6557..819c4ca 100644 --- a/src/amd/common/ac_nir_lower_esgs_io_to_mem.c +++ b/src/amd/common/ac_nir_lower_esgs_io_to_mem.c @@ -272,12 +272,12 @@ lower_gs_per_vertex_input_load(nir_builder *b, nir_def *off = gs_per_vertex_input_offset(b, st, intrin); if (st->gfx_level >= GFX9) - return nir_load_shared(b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size, off); + return nir_load_shared(b, intrin->def.num_components, intrin->def.bit_size, off); unsigned wave_size = 64u; /* GFX6-8 only support wave64 */ nir_def *ring = nir_load_ring_esgs_amd(b); return emit_split_buffer_load(b, ring, off, nir_imm_zero(b, 1, 32), 4u * wave_size, - intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size); + intrin->def.num_components, intrin->def.bit_size); } static bool diff --git a/src/amd/common/ac_nir_lower_global_access.c b/src/amd/common/ac_nir_lower_global_access.c index d085e9b..8c05e9f 100644 --- a/src/amd/common/ac_nir_lower_global_access.c +++ b/src/amd/common/ac_nir_lower_global_access.c @@ -98,8 +98,8 @@ process_instr(nir_builder *b, nir_instr *instr, void *_) new_intrin->num_components = intrin->num_components; if (op != nir_intrinsic_store_global_amd) - nir_def_init(&new_intrin->instr, &new_intrin->dest.ssa, - intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size); + nir_def_init(&new_intrin->instr, &new_intrin->def, + intrin->def.num_components, intrin->def.bit_size); unsigned num_src = nir_intrinsic_infos[intrin->intrinsic].num_srcs; for (unsigned i = 0; i < num_src; i++) @@ -121,7 +121,7 @@ process_instr(nir_builder *b, nir_instr *instr, void *_) nir_builder_instr_insert(b, &new_intrin->instr); if (op != nir_intrinsic_store_global_amd) - nir_def_rewrite_uses(&intrin->dest.ssa, &new_intrin->dest.ssa); + nir_def_rewrite_uses(&intrin->def, &new_intrin->def); nir_instr_remove(&intrin->instr); return true; diff --git a/src/amd/common/ac_nir_lower_image_opcodes_cdna.c b/src/amd/common/ac_nir_lower_image_opcodes_cdna.c index b999d3e..b6b4ff4 100644 --- a/src/amd/common/ac_nir_lower_image_opcodes_cdna.c +++ b/src/amd/common/ac_nir_lower_image_opcodes_cdna.c @@ -299,7 +299,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data) nir_def *desc = NULL, *result = NULL; ASSERTED const char *intr_name; - nir_def *dst = &intr->dest.ssa; + nir_def *dst = &intr->def; b->cursor = nir_before_instr(instr); switch (intr->intrinsic) { @@ -357,7 +357,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data) case nir_intrinsic_image_load: case nir_intrinsic_image_deref_load: case nir_intrinsic_bindless_image_load: - result = emulated_image_load(b, intr->dest.ssa.num_components, intr->dest.ssa.bit_size, + result = emulated_image_load(b, intr->def.num_components, intr->def.bit_size, desc, intr->src[1].ssa, access, dim, is_array, true); nir_def_rewrite_uses_after(dst, result, instr); nir_instr_remove(instr); @@ -378,7 +378,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data) nir_tex_instr *new_tex; nir_def *coord = NULL, *desc = NULL, *sampler_desc = NULL, *result = NULL; - nir_def *dst = &tex->dest.ssa; + nir_def *dst = &tex->def; b->cursor = nir_before_instr(instr); switch (tex->op) { @@ -400,10 +400,10 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data) new_tex->dest_type = nir_type_int32; nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr); new_tex->src[0].src_type = tex->src[i].src_type; - nir_def_init(&new_tex->instr, &new_tex->dest.ssa, + nir_def_init(&new_tex->instr, &new_tex->def, nir_tex_instr_dest_size(new_tex), 32); nir_builder_instr_insert(b, &new_tex->instr); - desc = &new_tex->dest.ssa; + desc = &new_tex->def; break; case nir_tex_src_sampler_deref: @@ -419,10 +419,10 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data) new_tex->dest_type = nir_type_int32; nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr); new_tex->src[0].src_type = tex->src[i].src_type; - nir_def_init(&new_tex->instr, &new_tex->dest.ssa, + nir_def_init(&new_tex->instr, &new_tex->def, nir_tex_instr_dest_size(new_tex), 32); nir_builder_instr_insert(b, &new_tex->instr); - sampler_desc = &new_tex->dest.ssa; + sampler_desc = &new_tex->def; break; case nir_tex_src_coord: @@ -443,7 +443,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data) switch (tex->op) { case nir_texop_txf: - result = emulated_image_load(b, tex->dest.ssa.num_components, tex->dest.ssa.bit_size, + result = emulated_image_load(b, tex->def.num_components, tex->def.bit_size, desc, coord, ACCESS_RESTRICT | ACCESS_NON_WRITEABLE | ACCESS_CAN_REORDER, tex->sampler_dim, tex->is_array, true); @@ -453,7 +453,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data) case nir_texop_tex: case nir_texop_txl: - result = emulated_tex_level_zero(b, tex->dest.ssa.num_components, tex->dest.ssa.bit_size, + result = emulated_tex_level_zero(b, tex->def.num_components, tex->def.bit_size, desc, sampler_desc, coord, tex->sampler_dim, tex->is_array); nir_def_rewrite_uses_after(dst, result, instr); nir_instr_remove(instr); diff --git a/src/amd/common/ac_nir_lower_ngg.c b/src/amd/common/ac_nir_lower_ngg.c index 4d31e86..d98833e 100644 --- a/src/amd/common/ac_nir_lower_ngg.c +++ b/src/amd/common/ac_nir_lower_ngg.c @@ -1164,15 +1164,15 @@ find_reusable_ssa_def(nir_instr *instr) nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); if (!nir_intrinsic_can_reorder(intrin) || !nir_intrinsic_infos[intrin->intrinsic].has_dest || - intrin->dest.ssa.divergent) + intrin->def.divergent) return NULL; - return &intrin->dest.ssa; + return &intrin->def; } case nir_instr_type_phi: { nir_phi_instr *phi = nir_instr_as_phi(instr); - if (phi->dest.ssa.divergent) + if (phi->def.divergent) return NULL; - return &phi->dest.ssa; + return &phi->def; } default: return NULL; @@ -3913,8 +3913,8 @@ ms_load_arrayed_output_intrin(nir_builder *b, unsigned location = nir_intrinsic_io_semantics(intrin).location; unsigned component_offset = nir_intrinsic_component(intrin); - unsigned bit_size = intrin->dest.ssa.bit_size; - unsigned num_components = intrin->dest.ssa.num_components; + unsigned bit_size = intrin->def.bit_size; + unsigned num_components = intrin->def.num_components; unsigned load_bit_size = MAX2(bit_size, 32); nir_def *load = diff --git a/src/amd/common/ac_nir_lower_ps.c b/src/amd/common/ac_nir_lower_ps.c index c5822e4..f80fe20 100644 --- a/src/amd/common/ac_nir_lower_ps.c +++ b/src/amd/common/ac_nir_lower_ps.c @@ -187,7 +187,7 @@ lower_ps_load_barycentric(nir_builder *b, nir_intrinsic_instr *intrin, lower_ps_ b->cursor = nir_before_instr(&intrin->instr); nir_def *replacement = nir_load_var(b, var); - nir_def_rewrite_uses(&intrin->dest.ssa, replacement); + nir_def_rewrite_uses(&intrin->def, replacement); nir_instr_remove(&intrin->instr); return true; @@ -255,7 +255,7 @@ lower_ps_load_sample_mask_in(nir_builder *b, nir_intrinsic_instr *intrin, lower_ nir_def *sample_mask = nir_load_sample_mask_in(b); nir_def *replacement = nir_iand(b, sample_mask, submask); - nir_def_rewrite_uses(&intrin->dest.ssa, replacement); + nir_def_rewrite_uses(&intrin->def, replacement); nir_instr_remove(&intrin->instr); return true; diff --git a/src/amd/common/ac_nir_lower_resinfo.c b/src/amd/common/ac_nir_lower_resinfo.c index 55f3033..09e1aa2 100644 --- a/src/amd/common/ac_nir_lower_resinfo.c +++ b/src/amd/common/ac_nir_lower_resinfo.c @@ -212,7 +212,7 @@ static bool lower_resinfo(nir_builder *b, nir_instr *instr, void *data) bool is_array; nir_def *desc = NULL; - dst = &intr->dest.ssa; + dst = &intr->def; b->cursor = nir_before_instr(instr); switch (intr->intrinsic) { @@ -268,7 +268,7 @@ static bool lower_resinfo(nir_builder *b, nir_instr *instr, void *data) nir_def *desc = NULL; nir_src *lod = NULL; - dst = &tex->dest.ssa; + dst = &tex->def; b->cursor = nir_before_instr(instr); switch (tex->op) { @@ -288,10 +288,10 @@ static bool lower_resinfo(nir_builder *b, nir_instr *instr, void *data) new_tex->dest_type = nir_type_int32; nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr); new_tex->src[0].src_type = tex->src[i].src_type; - nir_def_init(&new_tex->instr, &new_tex->dest.ssa, + nir_def_init(&new_tex->instr, &new_tex->def, nir_tex_instr_dest_size(new_tex), 32); nir_builder_instr_insert(b, &new_tex->instr); - desc = &new_tex->dest.ssa; + desc = &new_tex->def; break; case nir_tex_src_lod: diff --git a/src/amd/common/ac_nir_lower_subdword_loads.c b/src/amd/common/ac_nir_lower_subdword_loads.c index 5142a1f..50cd145 100644 --- a/src/amd/common/ac_nir_lower_subdword_loads.c +++ b/src/amd/common/ac_nir_lower_subdword_loads.c @@ -55,7 +55,7 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data) return false; } - unsigned bit_size = intr->dest.ssa.bit_size; + unsigned bit_size = intr->def.bit_size; if (bit_size >= 32) return false; @@ -70,15 +70,15 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data) nir_src *src_offset = nir_get_io_offset_src(intr); nir_def *offset = src_offset->ssa; - nir_def *result = &intr->dest.ssa; + nir_def *result = &intr->def; /* Change the load to 32 bits per channel, update the channel count, * and increase the declared load alignment. */ - intr->dest.ssa.bit_size = 32; + intr->def.bit_size = 32; if (align_mul == 4 && align_offset == 0) { - intr->num_components = intr->dest.ssa.num_components = + intr->num_components = intr->def.num_components = DIV_ROUND_UP(num_components, comp_per_dword); /* Aligned loads. Just bitcast the vector and trim it if there are @@ -87,7 +87,7 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data) b->cursor = nir_after_instr(instr); result = nir_extract_bits(b, &result, 1, 0, num_components, bit_size); - nir_def_rewrite_uses_after(&intr->dest.ssa, result, + nir_def_rewrite_uses_after(&intr->def, result, result->parent_instr); return true; } @@ -95,7 +95,7 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data) /* Multi-component unaligned loads may straddle the dword boundary. * E.g. for 2 components, we need to load an extra dword, and so on. */ - intr->num_components = intr->dest.ssa.num_components = + intr->num_components = intr->def.num_components = DIV_ROUND_UP(4 - align_mul + align_offset + num_components * component_size, 4); nir_intrinsic_set_align(intr, @@ -121,7 +121,7 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data) result = nir_extract_bits(b, &result, 1, comp_offset * bit_size, num_components, bit_size); - nir_def_rewrite_uses_after(&intr->dest.ssa, result, + nir_def_rewrite_uses_after(&intr->def, result, result->parent_instr); return true; } @@ -203,7 +203,7 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data) result = nir_vec(b, elems, intr->num_components); result = nir_extract_bits(b, &result, 1, 0, num_components, bit_size); - nir_def_rewrite_uses_after(&intr->dest.ssa, result, + nir_def_rewrite_uses_after(&intr->def, result, result->parent_instr); return true; } diff --git a/src/amd/common/ac_nir_lower_taskmesh_io_to_mem.c b/src/amd/common/ac_nir_lower_taskmesh_io_to_mem.c index 61225b6..f055ae0 100644 --- a/src/amd/common/ac_nir_lower_taskmesh_io_to_mem.c +++ b/src/amd/common/ac_nir_lower_taskmesh_io_to_mem.c @@ -214,8 +214,8 @@ lower_taskmesh_payload_load(nir_builder *b, lower_tsms_io_state *s) { unsigned base = nir_intrinsic_base(intrin); - unsigned num_components = intrin->dest.ssa.num_components; - unsigned bit_size = intrin->dest.ssa.bit_size; + unsigned num_components = intrin->def.num_components; + unsigned bit_size = intrin->def.bit_size; nir_def *ptr = b->shader->info.stage == MESA_SHADER_TASK ? diff --git a/src/amd/common/ac_nir_lower_tess_io_to_mem.c b/src/amd/common/ac_nir_lower_tess_io_to_mem.c index 71cbb53..2fcdd35 100644 --- a/src/amd/common/ac_nir_lower_tess_io_to_mem.c +++ b/src/amd/common/ac_nir_lower_tess_io_to_mem.c @@ -402,7 +402,7 @@ lower_hs_per_vertex_input_load(nir_builder *b, nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); nir_def *off = hs_per_vertex_input_lds_offset(b, st, intrin); - return nir_load_shared(b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size, off); + return nir_load_shared(b, intrin->def.num_components, intrin->def.bit_size, off); } static nir_def * @@ -480,7 +480,7 @@ lower_hs_output_load(nir_builder *b, lower_tess_io_state *st) { nir_def *off = hs_output_lds_offset(b, st, intrin); - return nir_load_shared(b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size, off); + return nir_load_shared(b, intrin->def.num_components, intrin->def.bit_size, off); } static void @@ -704,8 +704,8 @@ lower_tes_input_load(nir_builder *b, nir_def *zero = nir_imm_int(b, 0); - return nir_load_buffer_amd(b, intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size, offchip_ring, + return nir_load_buffer_amd(b, intrin->def.num_components, + intrin->def.bit_size, offchip_ring, off, offchip_offset, zero, .access = ACCESS_COHERENT); } diff --git a/src/amd/compiler/aco_instruction_selection.cpp b/src/amd/compiler/aco_instruction_selection.cpp index 284c9b0..88e588a 100644 --- a/src/amd/compiler/aco_instruction_selection.cpp +++ b/src/amd/compiler/aco_instruction_selection.cpp @@ -5449,7 +5449,7 @@ emit_load_frag_shading_rate(isel_context* ctx, Temp dst) void visit_load_interpolated_input(isel_context* ctx, nir_intrinsic_instr* instr) { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); Temp coords = get_ssa_temp(ctx, instr->src[0].ssa); unsigned idx = nir_intrinsic_base(instr); unsigned component = nir_intrinsic_component(instr); @@ -5457,13 +5457,13 @@ visit_load_interpolated_input(isel_context* ctx, nir_intrinsic_instr* instr) assert(nir_src_is_const(instr->src[1]) && !nir_src_as_uint(instr->src[1])); - if (instr->dest.ssa.num_components == 1) { + if (instr->def.num_components == 1) { emit_interp_instr(ctx, idx, component, coords, dst, prim_mask); } else { aco_ptr vec(create_instruction( - aco_opcode::p_create_vector, Format::PSEUDO, instr->dest.ssa.num_components, 1)); - for (unsigned i = 0; i < instr->dest.ssa.num_components; i++) { - Temp tmp = ctx->program->allocateTmp(instr->dest.ssa.bit_size == 16 ? v2b : v1); + aco_opcode::p_create_vector, Format::PSEUDO, instr->def.num_components, 1)); + for (unsigned i = 0; i < instr->def.num_components; i++) { + Temp tmp = ctx->program->allocateTmp(instr->def.bit_size == 16 ? v2b : v1); emit_interp_instr(ctx, idx, component + i, coords, tmp, prim_mask); vec->operands[i] = Operand(tmp); } @@ -5579,7 +5579,7 @@ void visit_load_fs_input(isel_context* ctx, nir_intrinsic_instr* instr) { Builder bld(ctx->program, ctx->block); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); nir_src offset = *nir_get_io_offset_src(instr); if (!nir_src_is_const(offset) || nir_src_as_uint(offset)) @@ -5594,18 +5594,18 @@ visit_load_fs_input(isel_context* ctx, nir_intrinsic_instr* instr) if (instr->intrinsic == nir_intrinsic_load_input_vertex) vertex_id = nir_src_as_uint(instr->src[0]); - if (instr->dest.ssa.num_components == 1 && instr->dest.ssa.bit_size != 64) { + if (instr->def.num_components == 1 && instr->def.bit_size != 64) { emit_interp_mov_instr(ctx, idx, component, vertex_id, dst, prim_mask); } else { - unsigned num_components = instr->dest.ssa.num_components; - if (instr->dest.ssa.bit_size == 64) + unsigned num_components = instr->def.num_components; + if (instr->def.bit_size == 64) num_components *= 2; aco_ptr vec{create_instruction( aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)}; for (unsigned i = 0; i < num_components; i++) { unsigned chan_component = (component + i) % 4; unsigned chan_idx = idx + (component + i) / 4; - vec->operands[i] = Operand(bld.tmp(instr->dest.ssa.bit_size == 16 ? v2b : v1)); + vec->operands[i] = Operand(bld.tmp(instr->def.bit_size == 16 ? v2b : v1)); emit_interp_mov_instr(ctx, chan_idx, chan_component, vertex_id, vec->operands[i].getTemp(), prim_mask); } @@ -5620,7 +5620,7 @@ visit_load_tcs_per_vertex_input(isel_context* ctx, nir_intrinsic_instr* instr) assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL); Builder bld(ctx->program, ctx->block); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); if (load_input_from_temps(ctx, instr, dst)) return; @@ -5643,7 +5643,7 @@ visit_load_tess_coord(isel_context* ctx, nir_intrinsic_instr* instr) assert(ctx->shader->info.stage == MESA_SHADER_TESS_EVAL); Builder bld(ctx->program, ctx->block); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); Operand tes_u(get_arg(ctx, ctx->args->tes_u)); Operand tes_v(get_arg(ctx, ctx->args->tes_v)); @@ -5692,11 +5692,11 @@ load_buffer(isel_context* ctx, unsigned num_components, unsigned component_size, void visit_load_ubo(isel_context* ctx, nir_intrinsic_instr* instr) { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); Builder bld(ctx->program, ctx->block); Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa)); - unsigned size = instr->dest.ssa.bit_size / 8; + unsigned size = instr->def.bit_size / 8; load_buffer(ctx, instr->num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa), nir_intrinsic_align_mul(instr), nir_intrinsic_align_offset(instr)); } @@ -5705,15 +5705,15 @@ void visit_load_push_constant(isel_context* ctx, nir_intrinsic_instr* instr) { Builder bld(ctx->program, ctx->block); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); unsigned offset = nir_intrinsic_base(instr); - unsigned count = instr->dest.ssa.num_components; + unsigned count = instr->def.num_components; nir_const_value* index_cv = nir_src_as_const_value(instr->src[0]); - if (instr->dest.ssa.bit_size == 64) + if (instr->def.bit_size == 64) count *= 2; - if (index_cv && instr->dest.ssa.bit_size >= 32) { + if (index_cv && instr->def.bit_size >= 32) { unsigned start = (offset + index_cv->u32) / 4u; uint64_t mask = BITFIELD64_MASK(count) << start; if ((ctx->args->inline_push_const_mask | mask) == ctx->args->inline_push_const_mask && @@ -5743,12 +5743,12 @@ visit_load_push_constant(isel_context* ctx, nir_intrinsic_instr* instr) bool trim = false; bool aligned = true; - if (instr->dest.ssa.bit_size == 8) { + if (instr->def.bit_size == 8) { aligned = index_cv && (offset + index_cv->u32) % 4 == 0; bool fits_in_dword = count == 1 || (index_cv && ((offset + index_cv->u32) % 4 + count) <= 4); if (!aligned) vec = fits_in_dword ? bld.tmp(s1) : bld.tmp(s2); - } else if (instr->dest.ssa.bit_size == 16) { + } else if (instr->def.bit_size == 16) { aligned = index_cv && (offset + index_cv->u32) % 4 == 0; if (!aligned) vec = count == 4 ? bld.tmp(s4) : count > 1 ? bld.tmp(s2) : bld.tmp(s1); @@ -5786,13 +5786,13 @@ visit_load_push_constant(isel_context* ctx, nir_intrinsic_instr* instr) bld.pseudo(aco_opcode::p_create_vector, Definition(dst), emit_extract_vector(ctx, vec, 0, rc), emit_extract_vector(ctx, vec, 1, rc), emit_extract_vector(ctx, vec, 2, rc)); } - emit_split_vector(ctx, dst, instr->dest.ssa.num_components); + emit_split_vector(ctx, dst, instr->def.num_components); } void visit_load_constant(isel_context* ctx, nir_intrinsic_instr* instr) { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); Builder bld(ctx->program, ctx->block); @@ -5823,7 +5823,7 @@ visit_load_constant(isel_context* ctx, nir_intrinsic_instr* instr) Operand::c32(ctx->constant_data_offset)), Operand::c32(MIN2(base + range, ctx->shader->constant_data_size)), Operand::c32(desc_type)); - unsigned size = instr->dest.ssa.bit_size / 8; + unsigned size = instr->def.bit_size / 8; // TODO: get alignment information for subdword constants load_buffer(ctx, instr->num_components, size, dst, rsrc, offset, size, 0); } @@ -5955,7 +5955,7 @@ void visit_bvh64_intersect_ray_amd(isel_context* ctx, nir_intrinsic_instr* instr) { Builder bld(ctx->program, ctx->block); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); Temp resource = get_ssa_temp(ctx, instr->src[0].ssa); Temp node = get_ssa_temp(ctx, instr->src[1].ssa); Temp tmax = get_ssa_temp(ctx, instr->src[2].ssa); @@ -5988,7 +5988,7 @@ visit_bvh64_intersect_ray_amd(isel_context* ctx, nir_intrinsic_instr* instr) mimg->unrm = true; mimg->r128 = true; - emit_split_vector(ctx, dst, instr->dest.ssa.num_components); + emit_split_vector(ctx, dst, instr->def.num_components); } static std::vector @@ -6123,19 +6123,18 @@ visit_image_load(isel_context* ctx, nir_intrinsic_instr* instr) const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr); bool is_array = nir_intrinsic_image_array(instr); bool is_sparse = instr->intrinsic == nir_intrinsic_bindless_image_sparse_load; - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); memory_sync_info sync = get_memory_sync_info(instr, storage_image, 0); unsigned access = nir_intrinsic_access(instr); - unsigned result_size = instr->dest.ssa.num_components - is_sparse; - unsigned expand_mask = - nir_def_components_read(&instr->dest.ssa) & u_bit_consecutive(0, result_size); + unsigned result_size = instr->def.num_components - is_sparse; + unsigned expand_mask = nir_def_components_read(&instr->def) & u_bit_consecutive(0, result_size); expand_mask = MAX2(expand_mask, 1); /* this can be zero in the case of sparse image loads */ if (dim == GLSL_SAMPLER_DIM_BUF) expand_mask = (1u << util_last_bit(expand_mask)) - 1u; unsigned dmask = expand_mask; - if (instr->dest.ssa.bit_size == 64) { + if (instr->def.bit_size == 64) { expand_mask &= 0x9; /* only R64_UINT and R64_SINT supported. x is in xy of the result, w in zw */ dmask = ((expand_mask & 0x1) ? 0x3 : 0) | ((expand_mask & 0x8) ? 0xc : 0); @@ -6143,7 +6142,7 @@ visit_image_load(isel_context* ctx, nir_intrinsic_instr* instr) if (is_sparse) expand_mask |= 1 << result_size; - bool d16 = instr->dest.ssa.bit_size == 16; + bool d16 = instr->def.bit_size == 16; assert(!d16 || !is_sparse); unsigned num_bytes = util_bitcount(dmask) * (d16 ? 2 : 4) + is_sparse * 4; @@ -6227,7 +6226,7 @@ visit_image_load(isel_context* ctx, nir_intrinsic_instr* instr) } } - if (is_sparse && instr->dest.ssa.bit_size == 64) { + if (is_sparse && instr->def.bit_size == 64) { /* The result components are 64-bit but the sparse residency code is * 32-bit. So add a zero to the end so expand_vector() works correctly. */ @@ -6235,8 +6234,7 @@ visit_image_load(isel_context* ctx, nir_intrinsic_instr* instr) Operand::zero()); } - expand_vector(ctx, tmp, dst, instr->dest.ssa.num_components, expand_mask, - instr->dest.ssa.bit_size == 64); + expand_vector(ctx, tmp, dst, instr->def.num_components, expand_mask, instr->def.bit_size == 64); } void @@ -6443,7 +6441,7 @@ translate_buffer_image_atomic_op(const nir_atomic_op op, aco_opcode* buf_op, aco void visit_image_atomic(isel_context* ctx, nir_intrinsic_instr* instr) { - bool return_previous = !nir_def_is_unused(&instr->dest.ssa); + bool return_previous = !nir_def_is_unused(&instr->def); const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr); bool is_array = nir_intrinsic_image_array(instr); Builder bld(ctx->program, ctx->block); @@ -6462,7 +6460,7 @@ visit_image_atomic(isel_context* ctx, nir_intrinsic_instr* instr) data = bld.pseudo(aco_opcode::p_create_vector, bld.def(is_64bit ? v4 : v2), get_ssa_temp(ctx, instr->src[4].ssa), data); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); memory_sync_info sync = get_memory_sync_info(instr, storage_image, semantic_atomicrmw); if (dim == GLSL_SAMPLER_DIM_BUF) { @@ -6520,12 +6518,12 @@ visit_load_ssbo(isel_context* ctx, nir_intrinsic_instr* instr) Builder bld(ctx->program, ctx->block); unsigned num_components = instr->num_components; - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa)); unsigned access = nir_intrinsic_access(instr); bool glc = access & (ACCESS_VOLATILE | ACCESS_COHERENT); - unsigned size = instr->dest.ssa.bit_size / 8; + unsigned size = instr->def.bit_size / 8; bool allow_smem = access & ACCESS_CAN_REORDER; @@ -6585,7 +6583,7 @@ void visit_atomic_ssbo(isel_context* ctx, nir_intrinsic_instr* instr) { Builder bld(ctx->program, ctx->block); - bool return_previous = !nir_def_is_unused(&instr->dest.ssa); + bool return_previous = !nir_def_is_unused(&instr->def); Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[2].ssa)); const nir_atomic_op nir_op = nir_intrinsic_atomic_op(instr); @@ -6600,9 +6598,9 @@ visit_atomic_ssbo(isel_context* ctx, nir_intrinsic_instr* instr) Temp offset = get_ssa_temp(ctx, instr->src[1].ssa); Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa)); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); - aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64; + aco_opcode op = instr->def.bit_size == 32 ? op32 : op64; aco_ptr mubuf{ create_instruction(op, Format::MUBUF, 4, return_previous ? 1 : 0)}; mubuf->operands[0] = Operand(rsrc); @@ -6647,13 +6645,13 @@ visit_load_global(isel_context* ctx, nir_intrinsic_instr* instr) { Builder bld(ctx->program, ctx->block); unsigned num_components = instr->num_components; - unsigned component_size = instr->dest.ssa.bit_size / 8; + unsigned component_size = instr->def.bit_size / 8; Temp addr, offset; uint32_t const_offset; parse_global(ctx, instr, &addr, &const_offset, &offset); - LoadEmitInfo info = {Operand(addr), get_ssa_temp(ctx, &instr->dest.ssa), num_components, + LoadEmitInfo info = {Operand(addr), get_ssa_temp(ctx, &instr->def), num_components, component_size}; if (offset.id()) { info.resource = addr; @@ -6787,7 +6785,7 @@ void visit_global_atomic(isel_context* ctx, nir_intrinsic_instr* instr) { Builder bld(ctx->program, ctx->block); - bool return_previous = !nir_def_is_unused(&instr->dest.ssa); + bool return_previous = !nir_def_is_unused(&instr->def); Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa)); const nir_atomic_op nir_op = nir_intrinsic_atomic_op(instr); @@ -6797,7 +6795,7 @@ visit_global_atomic(isel_context* ctx, nir_intrinsic_instr* instr) data = bld.pseudo(aco_opcode::p_create_vector, bld.def(RegType::vgpr, data.size() * 2), get_ssa_temp(ctx, instr->src[2].ssa), data); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); aco_opcode op32, op64; @@ -6864,7 +6862,7 @@ visit_global_atomic(isel_context* ctx, nir_intrinsic_instr* instr) default: unreachable("unsupported atomic operation"); } - aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64; + aco_opcode op = instr->def.bit_size == 32 ? op32 : op64; aco_ptr flat{create_instruction( op, global ? Format::GLOBAL : Format::FLAT, 3, return_previous ? 1 : 0)}; if (addr.regClass() == s2) { @@ -6895,7 +6893,7 @@ visit_global_atomic(isel_context* ctx, nir_intrinsic_instr* instr) Temp rsrc = get_gfx6_global_rsrc(bld, addr); - aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64; + aco_opcode op = instr->def.bit_size == 32 ? op32 : op64; aco_ptr mubuf{ create_instruction(op, Format::MUBUF, 4, return_previous ? 1 : 0)}; @@ -6951,7 +6949,7 @@ visit_load_buffer(isel_context* ctx, nir_intrinsic_instr* intrin) bool v_offset_zero = nir_src_is_const(intrin->src[1]) && !nir_src_as_uint(intrin->src[1]); bool s_offset_zero = nir_src_is_const(intrin->src[2]) && !nir_src_as_uint(intrin->src[2]); - Temp dst = get_ssa_temp(ctx, &intrin->dest.ssa); + Temp dst = get_ssa_temp(ctx, &intrin->def); Temp descriptor = bld.as_uniform(get_ssa_temp(ctx, intrin->src[0].ssa)); Temp v_offset = v_offset_zero ? Temp(0, v1) : as_vgpr(ctx, get_ssa_temp(ctx, intrin->src[1].ssa)); @@ -6963,8 +6961,8 @@ visit_load_buffer(isel_context* ctx, nir_intrinsic_instr* intrin) bool slc = nir_intrinsic_access(intrin) & ACCESS_NON_TEMPORAL; unsigned const_offset = nir_intrinsic_base(intrin); - unsigned elem_size_bytes = intrin->dest.ssa.bit_size / 8u; - unsigned num_components = intrin->dest.ssa.num_components; + unsigned elem_size_bytes = intrin->def.bit_size / 8u; + unsigned num_components = intrin->def.num_components; nir_variable_mode mem_mode = nir_intrinsic_memory_modes(intrin); memory_sync_info sync(aco_storage_mode_from_nir_mem_mode(mem_mode)); @@ -7061,7 +7059,7 @@ void visit_load_smem(isel_context* ctx, nir_intrinsic_instr* instr) { Builder bld(ctx->program, ctx->block); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); Temp base = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa)); Temp offset = bld.as_uniform(get_ssa_temp(ctx, instr->src[1].ssa)); @@ -7096,7 +7094,7 @@ visit_load_smem(isel_context* ctx, nir_intrinsic_instr* instr) } else { bld.smem(opcode, Definition(dst), base, offset); } - emit_split_vector(ctx, dst, instr->dest.ssa.num_components); + emit_split_vector(ctx, dst, instr->def.num_components); } sync_scope @@ -7177,12 +7175,12 @@ void visit_load_shared(isel_context* ctx, nir_intrinsic_instr* instr) { // TODO: implement sparse reads using ds_read2_b32 and nir_def_components_read() - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa)); Builder bld(ctx->program, ctx->block); - unsigned elem_size_bytes = instr->dest.ssa.bit_size / 8; - unsigned num_components = instr->dest.ssa.num_components; + unsigned elem_size_bytes = instr->def.bit_size / 8; + unsigned num_components = instr->def.num_components; unsigned align = nir_intrinsic_align_mul(instr) ? nir_intrinsic_align(instr) : elem_size_bytes; load_lds(ctx, elem_size_bytes, num_components, dst, address, nir_intrinsic_base(instr), align); } @@ -7293,14 +7291,14 @@ visit_shared_atomic(isel_context* ctx, nir_intrinsic_instr* instr) default: unreachable("Unhandled shared atomic intrinsic"); } - bool return_previous = !nir_def_is_unused(&instr->dest.ssa); + bool return_previous = !nir_def_is_unused(&instr->def); aco_opcode op; if (data.size() == 1) { - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); op = return_previous ? op32_rtn : op32; } else { - assert(instr->dest.ssa.bit_size == 64); + assert(instr->def.bit_size == 64); op = return_previous ? op64_rtn : op64; } @@ -7323,7 +7321,7 @@ visit_shared_atomic(isel_context* ctx, nir_intrinsic_instr* instr) ds->operands[num_operands - 1] = m; ds->offset0 = offset; if (return_previous) - ds->definitions[0] = Definition(get_ssa_temp(ctx, &instr->dest.ssa)); + ds->definitions[0] = Definition(get_ssa_temp(ctx, &instr->def)); ds->sync = memory_sync_info(storage_shared, semantic_atomicrmw); if (m.isUndefined()) @@ -7341,7 +7339,7 @@ visit_access_shared2_amd(isel_context* ctx, nir_intrinsic_instr* instr) assert(bld.program->gfx_level >= GFX7); - bool is64bit = (is_store ? instr->src[0].ssa->bit_size : instr->dest.ssa.bit_size) == 64; + bool is64bit = (is_store ? instr->src[0].ssa->bit_size : instr->def.bit_size) == 64; uint8_t offset0 = nir_intrinsic_offset0(instr); uint8_t offset1 = nir_intrinsic_offset1(instr); bool st64 = nir_intrinsic_st64(instr); @@ -7358,7 +7356,7 @@ visit_access_shared2_amd(isel_context* ctx, nir_intrinsic_instr* instr) Temp data1 = emit_extract_vector(ctx, data, 1, comp_rc); ds = bld.ds(op, address, data0, data1, m, offset0, offset1); } else { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); Definition tmp_dst(dst.type() == RegType::vgpr ? dst : bld.tmp(is64bit ? v4 : v2)); aco_opcode op = st64 ? (is64bit ? aco_opcode::ds_read2st64_b64 : aco_opcode::ds_read2st64_b32) : (is64bit ? aco_opcode::ds_read2_b64 : aco_opcode::ds_read2_b32); @@ -7369,7 +7367,7 @@ visit_access_shared2_amd(isel_context* ctx, nir_intrinsic_instr* instr) ds->operands.pop_back(); if (!is_store) { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); if (dst.type() == RegType::sgpr) { emit_split_vector(ctx, ds->definitions[0].getTemp(), dst.size()); Temp comp[4]; @@ -7433,10 +7431,9 @@ void visit_load_scratch(isel_context* ctx, nir_intrinsic_instr* instr) { Builder bld(ctx->program, ctx->block); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); - LoadEmitInfo info = {Operand(v1), dst, instr->dest.ssa.num_components, - instr->dest.ssa.bit_size / 8u}; + LoadEmitInfo info = {Operand(v1), dst, instr->def.num_components, instr->def.bit_size / 8u}; info.align_mul = nir_intrinsic_align_mul(instr); info.align_offset = nir_intrinsic_align_offset(instr); info.swizzle_component_size = ctx->program->gfx_level <= GFX8 ? 4 : 0; @@ -7698,7 +7695,7 @@ void emit_uniform_subgroup(isel_context* ctx, nir_intrinsic_instr* instr, Temp src) { Builder bld(ctx->program, ctx->block); - Definition dst(get_ssa_temp(ctx, &instr->dest.ssa)); + Definition dst(get_ssa_temp(ctx, &instr->def)); assert(dst.regClass().type() != RegType::vgpr); if (src.regClass().type() == RegType::vgpr) bld.pseudo(aco_opcode::p_as_uniform, dst, src); @@ -7774,7 +7771,7 @@ emit_uniform_reduce(isel_context* ctx, nir_intrinsic_instr* instr) if (op == nir_op_iadd || op == nir_op_ixor || op == nir_op_fadd) { Builder bld(ctx->program, ctx->block); - Definition dst(get_ssa_temp(ctx, &instr->dest.ssa)); + Definition dst(get_ssa_temp(ctx, &instr->def)); unsigned bit_size = instr->src[0].ssa->bit_size; if (bit_size > 32) return false; @@ -7795,7 +7792,7 @@ bool emit_uniform_scan(isel_context* ctx, nir_intrinsic_instr* instr) { Builder bld(ctx->program, ctx->block); - Definition dst(get_ssa_temp(ctx, &instr->dest.ssa)); + Definition dst(get_ssa_temp(ctx, &instr->def)); nir_op op = (nir_op)nir_intrinsic_reduction_op(instr); bool inc = instr->intrinsic == nir_intrinsic_inclusive_scan; @@ -8035,7 +8032,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) glsl_interp_mode mode = (glsl_interp_mode)nir_intrinsic_interp_mode(instr); Temp bary = get_interp_param(ctx, instr->intrinsic, mode); assert(bary.size() == 2); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); bld.copy(Definition(dst), bary); emit_split_vector(ctx, dst, 2); break; @@ -8043,7 +8040,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) case nir_intrinsic_load_barycentric_model: { Temp model = get_arg(ctx, ctx->args->pull_model); assert(model.size() == 3); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); bld.copy(Definition(dst), model); emit_split_vector(ctx, dst, 3); break; @@ -8055,31 +8052,31 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) bld.pseudo(aco_opcode::p_split_vector, Definition(pos1), Definition(pos2), offset); Temp bary = get_interp_param(ctx, instr->intrinsic, (glsl_interp_mode)nir_intrinsic_interp_mode(instr)); - emit_interp_center(ctx, get_ssa_temp(ctx, &instr->dest.ssa), bary, pos1, pos2); + emit_interp_center(ctx, get_ssa_temp(ctx, &instr->def), bary, pos1, pos2); break; } case nir_intrinsic_load_front_face: { - bld.vopc(aco_opcode::v_cmp_lg_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), + bld.vopc(aco_opcode::v_cmp_lg_u32, Definition(get_ssa_temp(ctx, &instr->def)), Operand::zero(), get_arg(ctx, ctx->args->front_face)); break; } case nir_intrinsic_load_view_index: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->view_index))); break; } case nir_intrinsic_load_frag_coord: { - emit_load_frag_coord(ctx, get_ssa_temp(ctx, &instr->dest.ssa), 4); + emit_load_frag_coord(ctx, get_ssa_temp(ctx, &instr->def), 4); break; } case nir_intrinsic_load_frag_shading_rate: - emit_load_frag_shading_rate(ctx, get_ssa_temp(ctx, &instr->dest.ssa)); + emit_load_frag_shading_rate(ctx, get_ssa_temp(ctx, &instr->def)); break; case nir_intrinsic_load_sample_pos: { Temp posx = get_arg(ctx, ctx->args->frag_pos[0]); Temp posy = get_arg(ctx, ctx->args->frag_pos[1]); bld.pseudo( - aco_opcode::p_create_vector, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), + aco_opcode::p_create_vector, Definition(get_ssa_temp(ctx, &instr->def)), posx.id() ? bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), posx) : Operand::zero(), posy.id() ? bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), posy) : Operand::zero()); break; @@ -8126,7 +8123,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) case nir_intrinsic_store_scratch: visit_store_scratch(ctx, instr); break; case nir_intrinsic_barrier: emit_barrier(ctx, instr); break; case nir_intrinsic_load_num_workgroups: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); if (ctx->options->load_grid_size_from_user_sgpr) { bld.copy(Definition(dst), get_arg(ctx, ctx->args->num_work_groups)); } else { @@ -8140,26 +8137,26 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) break; } case nir_intrinsic_load_ray_launch_size: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->rt.launch_size))); emit_split_vector(ctx, dst, 3); break; } case nir_intrinsic_load_ray_launch_id: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->rt.launch_id))); emit_split_vector(ctx, dst, 3); break; } case nir_intrinsic_load_ray_launch_size_addr_amd: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); Temp addr = get_arg(ctx, ctx->args->rt.launch_size_addr); assert(addr.regClass() == s2); bld.copy(Definition(dst), Operand(addr)); break; } case nir_intrinsic_load_local_invocation_id: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); if (ctx->options->gfx_level >= GFX11) { Temp local_ids[3]; @@ -8179,7 +8176,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) break; } case nir_intrinsic_load_workgroup_id: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); if (ctx->stage.hw == AC_HW_COMPUTE_SHADER) { const struct ac_arg* ids = ctx->args->workgroup_ids; bld.pseudo(aco_opcode::p_create_vector, Definition(dst), @@ -8202,18 +8199,18 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) Temp temp = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), wave_id, Operand::c32(ctx->program->wave_size)); - emit_mbcnt(ctx, get_ssa_temp(ctx, &instr->dest.ssa), Operand(), Operand(temp)); + emit_mbcnt(ctx, get_ssa_temp(ctx, &instr->def), Operand(), Operand(temp)); } else { - bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), + bld.copy(Definition(get_ssa_temp(ctx, &instr->def)), get_arg(ctx, ctx->args->vs_rel_patch_id)); } break; } else if (ctx->stage.hw == AC_HW_LEGACY_GEOMETRY_SHADER || ctx->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER) { - bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), thread_id_in_threadgroup(ctx)); + bld.copy(Definition(get_ssa_temp(ctx, &instr->def)), thread_id_in_threadgroup(ctx)); break; } else if (ctx->program->workgroup_size <= ctx->program->wave_size) { - emit_mbcnt(ctx, get_ssa_temp(ctx, &instr->dest.ssa)); + emit_mbcnt(ctx, get_ssa_temp(ctx, &instr->def)); break; } @@ -8227,25 +8224,24 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) * feed that to v_or */ Temp tg_num = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand::c32(0xfc0u), get_arg(ctx, ctx->args->tg_size)); - bld.vop2(aco_opcode::v_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), tg_num, - id); + bld.vop2(aco_opcode::v_or_b32, Definition(get_ssa_temp(ctx, &instr->def)), tg_num, id); } else { /* Extract the bit field and multiply the result by 32 (left shift by 5), then do the OR */ Temp tg_num = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), get_arg(ctx, ctx->args->tg_size), Operand::c32(0x6u | (0x6u << 16))); - bld.vop3(aco_opcode::v_lshl_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), - tg_num, Operand::c32(0x5u), id); + bld.vop3(aco_opcode::v_lshl_or_b32, Definition(get_ssa_temp(ctx, &instr->def)), tg_num, + Operand::c32(0x5u), id); } break; } case nir_intrinsic_load_subgroup_invocation: { - emit_mbcnt(ctx, get_ssa_temp(ctx, &instr->dest.ssa)); + emit_mbcnt(ctx, get_ssa_temp(ctx, &instr->def)); break; } case nir_intrinsic_ballot: { Temp src = get_ssa_temp(ctx, instr->src[0].ssa); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); if (instr->src[0].ssa->bit_size == 1) { assert(src.regClass() == bld.lm); @@ -8279,9 +8275,9 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) if (instr->intrinsic == nir_intrinsic_read_invocation || !nir_src_is_divergent(instr->src[1])) tid = bld.as_uniform(tid); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); - if (instr->dest.ssa.bit_size != 1) + if (instr->def.bit_size != 1) src = as_vgpr(ctx, src); if (src.regClass() == v1b || src.regClass() == v2b) { @@ -8301,11 +8297,11 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) hi = emit_wqm(bld, emit_bpermute(ctx, bld, tid, hi)); bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi); emit_split_vector(ctx, dst, 2); - } else if (instr->dest.ssa.bit_size == 1 && tid.regClass() == s1) { + } else if (instr->def.bit_size == 1 && tid.regClass() == s1) { assert(src.regClass() == bld.lm); Temp tmp = bld.sopc(Builder::s_bitcmp1, bld.def(s1, scc), src, tid); bool_to_vector_condition(ctx, emit_wqm(bld, tmp), dst); - } else if (instr->dest.ssa.bit_size == 1 && tid.regClass() == v1) { + } else if (instr->def.bit_size == 1 && tid.regClass() == v1) { assert(src.regClass() == bld.lm); Temp tmp; if (ctx->program->gfx_level <= GFX7) @@ -8325,13 +8321,13 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) break; } case nir_intrinsic_load_sample_id: { - bld.vop3(aco_opcode::v_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), + bld.vop3(aco_opcode::v_bfe_u32, Definition(get_ssa_temp(ctx, &instr->def)), get_arg(ctx, ctx->args->ancillary), Operand::c32(8u), Operand::c32(4u)); break; } case nir_intrinsic_read_first_invocation: { Temp src = get_ssa_temp(ctx, instr->src[0].ssa); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); if (src.regClass() == v1b || src.regClass() == v2b || src.regClass() == v1) { emit_wqm(bld, bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), src), dst); } else if (src.regClass() == v2) { @@ -8341,7 +8337,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) hi = emit_wqm(bld, bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), hi)); bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi); emit_split_vector(ctx, dst, 2); - } else if (instr->dest.ssa.bit_size == 1) { + } else if (instr->def.bit_size == 1) { assert(src.regClass() == bld.lm); Temp tmp = bld.sopc(Builder::s_bitcmp1, bld.def(s1, scc), src, bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm))); @@ -8353,7 +8349,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) } case nir_intrinsic_vote_all: { Temp src = get_ssa_temp(ctx, instr->src[0].ssa); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); assert(src.regClass() == bld.lm); assert(dst.regClass() == bld.lm); @@ -8367,7 +8363,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) } case nir_intrinsic_vote_any: { Temp src = get_ssa_temp(ctx, instr->src[0].ssa); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); assert(src.regClass() == bld.lm); assert(dst.regClass() == bld.lm); @@ -8379,7 +8375,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) case nir_intrinsic_inclusive_scan: case nir_intrinsic_exclusive_scan: { Temp src = get_ssa_temp(ctx, instr->src[0].ssa); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); nir_op op = (nir_op)nir_intrinsic_reduction_op(instr); unsigned cluster_size = instr->intrinsic == nir_intrinsic_reduce ? nir_intrinsic_cluster_size(instr) : 0; @@ -8389,13 +8385,13 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) instr->intrinsic == nir_intrinsic_reduce && nir_intrinsic_include_helpers(instr); if (!nir_src_is_divergent(instr->src[0]) && cluster_size == ctx->program->wave_size && - instr->dest.ssa.bit_size != 1) { + instr->def.bit_size != 1) { /* We use divergence analysis to assign the regclass, so check if it's * working as expected */ ASSERTED bool expected_divergent = instr->intrinsic == nir_intrinsic_exclusive_scan; if (instr->intrinsic == nir_intrinsic_inclusive_scan) expected_divergent = op == nir_op_iadd || op == nir_op_fadd || op == nir_op_ixor; - assert(instr->dest.ssa.divergent == expected_divergent); + assert(instr->def.divergent == expected_divergent); if (instr->intrinsic == nir_intrinsic_reduce) { if (emit_uniform_reduce(ctx, instr)) @@ -8405,7 +8401,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) } } - if (instr->dest.ssa.bit_size == 1) { + if (instr->def.bit_size == 1) { if (op == nir_op_imul || op == nir_op_umin || op == nir_op_imin) op = nir_op_iand; else if (op == nir_op_iadd) @@ -8456,7 +8452,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) case nir_intrinsic_quad_swizzle_amd: { Temp src = get_ssa_temp(ctx, instr->src[0].ssa); - if (!instr->dest.ssa.divergent) { + if (!instr->def.divergent) { emit_uniform_subgroup(ctx, instr, src); break; } @@ -8464,7 +8460,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) /* Quad broadcast lane. */ unsigned lane = 0; /* Use VALU for the bool instructions that don't have a SALU-only special case. */ - bool bool_use_valu = instr->dest.ssa.bit_size == 1; + bool bool_use_valu = instr->def.bit_size == 1; uint16_t dpp_ctrl = 0; @@ -8481,14 +8477,14 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) default: break; } - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); Temp tmp(dst); /* Setup source. */ if (bool_use_valu) src = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(), Operand::c32(-1), src); - else if (instr->dest.ssa.bit_size != 1) + else if (instr->def.bit_size != 1) src = as_vgpr(ctx, src); /* Setup temporary destination. */ @@ -8497,7 +8493,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) else if (ctx->program->stage == fragment_fs) tmp = bld.tmp(dst.regClass()); - if (instr->dest.ssa.bit_size == 1 && instr->intrinsic == nir_intrinsic_quad_broadcast) { + if (instr->def.bit_size == 1 && instr->intrinsic == nir_intrinsic_quad_broadcast) { /* Special case for quad broadcast using SALU only. */ assert(src.regClass() == bld.lm && tmp.regClass() == bld.lm); @@ -8511,8 +8507,8 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm)); src = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), mask_tmp, src); bld.sop1(Builder::s_wqm, Definition(tmp), src); - } else if (instr->dest.ssa.bit_size <= 32 || bool_use_valu) { - unsigned excess_bytes = bool_use_valu ? 0 : 4 - instr->dest.ssa.bit_size / 8; + } else if (instr->def.bit_size <= 32 || bool_use_valu) { + unsigned excess_bytes = bool_use_valu ? 0 : 4 - instr->def.bit_size / 8; Definition def = excess_bytes ? bld.def(v1) : Definition(tmp); if (ctx->program->gfx_level >= GFX8) @@ -8523,7 +8519,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) if (excess_bytes) bld.pseudo(aco_opcode::p_split_vector, Definition(tmp), bld.def(RegClass::get(tmp.type(), excess_bytes)), def.getTemp()); - } else if (instr->dest.ssa.bit_size == 64) { + } else if (instr->def.bit_size == 64) { Temp lo = bld.tmp(v1), hi = bld.tmp(v1); bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src); @@ -8553,17 +8549,17 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) } case nir_intrinsic_masked_swizzle_amd: { Temp src = get_ssa_temp(ctx, instr->src[0].ssa); - if (!instr->dest.ssa.divergent) { + if (!instr->def.divergent) { emit_uniform_subgroup(ctx, instr, src); break; } - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); uint32_t mask = nir_intrinsic_swizzle_mask(instr); - if (instr->dest.ssa.bit_size != 1) + if (instr->def.bit_size != 1) src = as_vgpr(ctx, src); - if (instr->dest.ssa.bit_size == 1) { + if (instr->def.bit_size == 1) { assert(src.regClass() == bld.lm); src = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(), Operand::c32(-1), src); @@ -8594,7 +8590,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) Temp src = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa)); Temp val = bld.as_uniform(get_ssa_temp(ctx, instr->src[1].ssa)); Temp lane = bld.as_uniform(get_ssa_temp(ctx, instr->src[2].ssa)); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); if (dst.regClass() == v1) { /* src2 is ignored for writelane. RA assigns the same reg for dst */ emit_wqm(bld, bld.writelane(bld.def(v1), val, lane, src), dst); @@ -8615,7 +8611,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) case nir_intrinsic_mbcnt_amd: { Temp src = get_ssa_temp(ctx, instr->src[0].ssa); Temp add_src = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa)); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); /* Fit 64-bit mask for wave32 */ src = emit_extract_vector(ctx, src, 0, RegClass(src.type(), bld.lm.size())); Temp wqm_tmp = emit_mbcnt(ctx, bld.tmp(v1), Operand(src), Operand(add_src)); @@ -8624,7 +8620,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) } case nir_intrinsic_lane_permute_16_amd: { Temp src = get_ssa_temp(ctx, instr->src[0].ssa); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); assert(ctx->program->gfx_level >= GFX10); if (src.regClass() == s1) { @@ -8642,7 +8638,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) case nir_intrinsic_is_helper_invocation: { /* load_helper() after demote() get lowered to is_helper(). * Otherwise, these two behave the same. */ - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); bld.pseudo(aco_opcode::p_is_helper, Definition(dst), Operand(exec, bld.lm)); ctx->block->kind |= block_kind_needs_lowering; ctx->program->needs_exact = true; @@ -8697,14 +8693,14 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) } case nir_intrinsic_first_invocation: { emit_wqm(bld, bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm)), - get_ssa_temp(ctx, &instr->dest.ssa)); + get_ssa_temp(ctx, &instr->def)); break; } case nir_intrinsic_last_invocation: { Temp flbit = bld.sop1(Builder::s_flbit_i32, bld.def(s1), Operand(exec, bld.lm)); Temp last = bld.sop2(aco_opcode::s_sub_i32, bld.def(s1), bld.def(s1, scc), Operand::c32(ctx->program->wave_size - 1u), flbit); - emit_wqm(bld, last, get_ssa_temp(ctx, &instr->dest.ssa)); + emit_wqm(bld, last, get_ssa_temp(ctx, &instr->def)); break; } case nir_intrinsic_elect: { @@ -8713,12 +8709,12 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) * two p_elect with different exec masks as the same. */ Temp elected = bld.pseudo(aco_opcode::p_elect, bld.def(bld.lm), Operand(exec, bld.lm)); - emit_wqm(bld, elected, get_ssa_temp(ctx, &instr->dest.ssa)); + emit_wqm(bld, elected, get_ssa_temp(ctx, &instr->def)); ctx->block->kind |= block_kind_needs_lowering; break; } case nir_intrinsic_shader_clock: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); if (nir_intrinsic_memory_scope(instr) == SCOPE_SUBGROUP && ctx->options->gfx_level >= GFX10_3) { /* "((size - 1) << 11) | register" (SHADER_CYCLES is encoded as register 29) */ @@ -8738,32 +8734,32 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) break; } case nir_intrinsic_load_vertex_id_zero_base: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); bld.copy(Definition(dst), get_arg(ctx, ctx->args->vertex_id)); break; } case nir_intrinsic_load_first_vertex: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); bld.copy(Definition(dst), get_arg(ctx, ctx->args->base_vertex)); break; } case nir_intrinsic_load_base_instance: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); bld.copy(Definition(dst), get_arg(ctx, ctx->args->start_instance)); break; } case nir_intrinsic_load_instance_id: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); bld.copy(Definition(dst), get_arg(ctx, ctx->args->instance_id)); break; } case nir_intrinsic_load_draw_id: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); bld.copy(Definition(dst), get_arg(ctx, ctx->args->draw_id)); break; } case nir_intrinsic_load_invocation_id: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); if (ctx->shader->info.stage == MESA_SHADER_GEOMETRY) { if (ctx->options->gfx_level >= GFX10) @@ -8781,7 +8777,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) break; } case nir_intrinsic_load_primitive_id: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); switch (ctx->shader->info.stage) { case MESA_SHADER_GEOMETRY: @@ -8815,7 +8811,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) break; } case nir_intrinsic_load_gs_wave_id_amd: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); if (ctx->args->merged_wave_info.used) bld.pseudo(aco_opcode::p_extract, Definition(dst), bld.def(s1, scc), get_arg(ctx, ctx->args->merged_wave_info), Operand::c32(2u), Operand::c32(8u), @@ -8828,7 +8824,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) } case nir_intrinsic_is_subgroup_invocation_lt_amd: { Temp src = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa)); - bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), lanecount_to_mask(ctx, src)); + bld.copy(Definition(get_ssa_temp(ctx, &instr->def)), lanecount_to_mask(ctx, src)); break; } case nir_intrinsic_gds_atomic_add_amd: { @@ -8841,7 +8837,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) break; } case nir_intrinsic_load_sbt_base_amd: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); Temp addr = get_arg(ctx, ctx->args->rt.sbt_descriptors); assert(addr.regClass() == s2); bld.copy(Definition(dst), Operand(addr)); @@ -8849,13 +8845,12 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) } case nir_intrinsic_bvh64_intersect_ray_amd: visit_bvh64_intersect_ray_amd(ctx, instr); break; case nir_intrinsic_load_rt_dynamic_callable_stack_base_amd: - bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), + bld.copy(Definition(get_ssa_temp(ctx, &instr->def)), get_arg(ctx, ctx->args->rt.dynamic_callable_stack_base)); break; case nir_intrinsic_load_resume_shader_address_amd: { - bld.pseudo(aco_opcode::p_resume_shader_address, - Definition(get_ssa_temp(ctx, &instr->dest.ssa)), bld.def(s1, scc), - Operand::c32(nir_intrinsic_call_idx(instr))); + bld.pseudo(aco_opcode::p_resume_shader_address, Definition(get_ssa_temp(ctx, &instr->def)), + bld.def(s1, scc), Operand::c32(nir_intrinsic_call_idx(instr))); break; } case nir_intrinsic_overwrite_vs_arguments_amd: { @@ -8873,7 +8868,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) case nir_intrinsic_load_scalar_arg_amd: case nir_intrinsic_load_vector_arg_amd: { assert(nir_intrinsic_base(instr) < ctx->args->arg_count); - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); Temp src = ctx->arg_temps[nir_intrinsic_base(instr)]; assert(src.id()); assert(src.type() == (instr->intrinsic == nir_intrinsic_load_scalar_arg_amd ? RegType::sgpr @@ -8883,7 +8878,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) break; } case nir_intrinsic_ordered_xfb_counter_add_amd: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); Temp ordered_id = get_ssa_temp(ctx, instr->src[0].ssa); Temp counter = get_ssa_temp(ctx, instr->src[1].ssa); @@ -9035,7 +9030,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) break; } case nir_intrinsic_strict_wqm_coord_amd: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); Temp src = get_ssa_temp(ctx, instr->src[0].ssa); Temp tmp = bld.tmp(RegClass::get(RegType::vgpr, dst.bytes())); unsigned begin_size = nir_intrinsic_base(instr); @@ -9062,13 +9057,13 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr) break; } case nir_intrinsic_load_lds_ngg_scratch_base_amd: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); bld.sop1(aco_opcode::p_load_symbol, Definition(dst), Operand::c32(aco_symbol_lds_ngg_scratch_base)); break; } case nir_intrinsic_load_lds_ngg_gs_out_vertex_base_amd: { - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp dst = get_ssa_temp(ctx, &instr->def); bld.sop1(aco_opcode::p_load_symbol, Definition(dst), Operand::c32(aco_symbol_lds_ngg_gs_out_vertex_base)); break; @@ -9338,18 +9333,18 @@ visit_tex(isel_context* ctx, nir_tex_instr* instr) } /* Build tex instruction */ - unsigned dmask = nir_def_components_read(&instr->dest.ssa) & 0xf; + unsigned dmask = nir_def_components_read(&instr->def) & 0xf; if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) dmask = u_bit_consecutive(0, util_last_bit(dmask)); if (instr->is_sparse) dmask = MAX2(dmask, 1) | 0x10; - bool d16 = instr->dest.ssa.bit_size == 16; - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + bool d16 = instr->def.bit_size == 16; + Temp dst = get_ssa_temp(ctx, &instr->def); Temp tmp_dst = dst; /* gather4 selects the component by dmask and always returns vec4 (vec5 if sparse) */ if (instr->op == nir_texop_tg4) { - assert(instr->dest.ssa.num_components == (4 + instr->is_sparse)); + assert(instr->def.num_components == (4 + instr->is_sparse)); if (instr->is_shadow) dmask = 1; else @@ -9358,9 +9353,8 @@ visit_tex(isel_context* ctx, nir_tex_instr* instr) tmp_dst = bld.tmp(instr->is_sparse ? v5 : (d16 ? v2 : v4)); } else if (instr->op == nir_texop_fragment_mask_fetch_amd) { tmp_dst = bld.tmp(v1); - } else if (util_bitcount(dmask) != instr->dest.ssa.num_components || - dst.type() == RegType::sgpr) { - unsigned bytes = util_bitcount(dmask) * instr->dest.ssa.bit_size / 8; + } else if (util_bitcount(dmask) != instr->def.num_components || dst.type() == RegType::sgpr) { + unsigned bytes = util_bitcount(dmask) * instr->def.bit_size / 8; tmp_dst = bld.tmp(RegClass::get(RegType::vgpr, bytes)); } @@ -9497,7 +9491,7 @@ visit_tex(isel_context* ctx, nir_tex_instr* instr) mubuf->operands[3] = emit_tfe_init(bld, tmp_dst); ctx->block->instructions.emplace_back(std::move(mubuf)); - expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, dmask); + expand_vector(ctx, tmp_dst, dst, instr->def.num_components, dmask); return; } @@ -9557,7 +9551,7 @@ visit_tex(isel_context* ctx, nir_tex_instr* instr) bld.copy(bld.def(v1), Operand::c32(0x76543210)), tmp_dst, is_not_null); } } else { - expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, dmask); + expand_vector(ctx, tmp_dst, dst, instr->def.num_components, dmask); } return; } @@ -9741,7 +9735,7 @@ visit_tex(isel_context* ctx, nir_tex_instr* instr) val[3]); } unsigned mask = instr->op == nir_texop_tg4 ? (instr->is_sparse ? 0x1F : 0xF) : dmask; - expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, mask); + expand_vector(ctx, tmp_dst, dst, instr->def.num_components, mask); } Operand @@ -9763,10 +9757,10 @@ void visit_phi(isel_context* ctx, nir_phi_instr* instr) { aco_ptr phi; - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); - assert(instr->dest.ssa.bit_size != 1 || dst.regClass() == ctx->program->lane_mask); + Temp dst = get_ssa_temp(ctx, &instr->def); + assert(instr->def.bit_size != 1 || dst.regClass() == ctx->program->lane_mask); - bool logical = !dst.is_linear() || instr->dest.ssa.divergent; + bool logical = !dst.is_linear() || instr->def.divergent; logical |= (ctx->block->kind & block_kind_merge) != 0; aco_opcode opcode = logical ? aco_opcode::p_phi : aco_opcode::p_linear_phi; diff --git a/src/amd/compiler/aco_instruction_selection_setup.cpp b/src/amd/compiler/aco_instruction_selection_setup.cpp index 5cbec2b..a524069 100644 --- a/src/amd/compiler/aco_instruction_selection_setup.cpp +++ b/src/amd/compiler/aco_instruction_selection_setup.cpp @@ -94,7 +94,7 @@ only_used_by_cross_lane_instrs(nir_def* ssa, bool follow_phis = true) return false; nir_phi_instr* phi = nir_instr_as_phi(src->parent_instr); - if (!only_used_by_cross_lane_instrs(&phi->dest.ssa, false)) + if (!only_used_by_cross_lane_instrs(&phi->def, false)) return false; continue; @@ -461,8 +461,8 @@ init_context(isel_context* ctx, nir_shader* shader) if (!nir_intrinsic_infos[intrinsic->intrinsic].has_dest) break; if (intrinsic->intrinsic == nir_intrinsic_strict_wqm_coord_amd) { - regclasses[intrinsic->dest.ssa.index] = - RegClass::get(RegType::vgpr, intrinsic->dest.ssa.num_components * 4 + + regclasses[intrinsic->def.index] = + RegClass::get(RegType::vgpr, intrinsic->def.num_components * 4 + nir_intrinsic_base(intrinsic)) .as_linear(); break; @@ -542,7 +542,7 @@ init_context(isel_context* ctx, nir_shader* shader) * it is beneficial to use a VGPR destination. This is because this allows * to put the s_waitcnt further down, which decreases latency. */ - if (only_used_by_cross_lane_instrs(&intrinsic->dest.ssa)) { + if (only_used_by_cross_lane_instrs(&intrinsic->def)) { type = RegType::vgpr; break; } @@ -560,7 +560,7 @@ init_context(isel_context* ctx, nir_shader* shader) case nir_intrinsic_load_ubo: case nir_intrinsic_load_ssbo: case nir_intrinsic_load_global_amd: - type = intrinsic->dest.ssa.divergent ? RegType::vgpr : RegType::sgpr; + type = intrinsic->def.divergent ? RegType::vgpr : RegType::sgpr; break; case nir_intrinsic_load_view_index: type = ctx->stage == fragment_fs ? RegType::vgpr : RegType::sgpr; @@ -573,22 +573,21 @@ init_context(isel_context* ctx, nir_shader* shader) } break; } - RegClass rc = get_reg_class(ctx, type, intrinsic->dest.ssa.num_components, - intrinsic->dest.ssa.bit_size); - regclasses[intrinsic->dest.ssa.index] = rc; + RegClass rc = + get_reg_class(ctx, type, intrinsic->def.num_components, intrinsic->def.bit_size); + regclasses[intrinsic->def.index] = rc; break; } case nir_instr_type_tex: { nir_tex_instr* tex = nir_instr_as_tex(instr); - RegType type = tex->dest.ssa.divergent ? RegType::vgpr : RegType::sgpr; + RegType type = tex->def.divergent ? RegType::vgpr : RegType::sgpr; if (tex->op == nir_texop_texture_samples) { - assert(!tex->dest.ssa.divergent); + assert(!tex->def.divergent); } - RegClass rc = - get_reg_class(ctx, type, tex->dest.ssa.num_components, tex->dest.ssa.bit_size); - regclasses[tex->dest.ssa.index] = rc; + RegClass rc = get_reg_class(ctx, type, tex->def.num_components, tex->def.bit_size); + regclasses[tex->def.index] = rc; break; } case nir_instr_type_ssa_undef: { @@ -601,11 +600,11 @@ init_context(isel_context* ctx, nir_shader* shader) case nir_instr_type_phi: { nir_phi_instr* phi = nir_instr_as_phi(instr); RegType type = RegType::sgpr; - unsigned num_components = phi->dest.ssa.num_components; - assert((phi->dest.ssa.bit_size != 1 || num_components == 1) && + unsigned num_components = phi->def.num_components; + assert((phi->def.bit_size != 1 || num_components == 1) && "Multiple components not supported on boolean phis."); - if (phi->dest.ssa.divergent) { + if (phi->def.divergent) { type = RegType::vgpr; } else { nir_foreach_phi_src (src, phi) { @@ -614,10 +613,10 @@ init_context(isel_context* ctx, nir_shader* shader) } } - RegClass rc = get_reg_class(ctx, type, num_components, phi->dest.ssa.bit_size); - if (rc != regclasses[phi->dest.ssa.index]) + RegClass rc = get_reg_class(ctx, type, num_components, phi->def.bit_size); + if (rc != regclasses[phi->def.index]) done = false; - regclasses[phi->dest.ssa.index] = rc; + regclasses[phi->def.index] = rc; break; } default: break; diff --git a/src/amd/llvm/ac_nir_to_llvm.c b/src/amd/llvm/ac_nir_to_llvm.c index 49ccc54..cc16a1f 100644 --- a/src/amd/llvm/ac_nir_to_llvm.c +++ b/src/amd/llvm/ac_nir_to_llvm.c @@ -1471,7 +1471,7 @@ static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx, const nir_te assert((!args->tfe || !args->d16) && "unsupported"); if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) { - unsigned mask = nir_def_components_read(&instr->dest.ssa); + unsigned mask = nir_def_components_read(&instr->def); /* Buffers don't support A16. */ if (args->a16) @@ -1479,7 +1479,7 @@ static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx, const nir_te return ac_build_buffer_load_format(&ctx->ac, args->resource, args->coords[0], ctx->ac.i32_0, util_last_bit(mask), 0, true, - instr->dest.ssa.bit_size == 16, + instr->def.bit_size == 16, args->tfe); } @@ -1566,11 +1566,11 @@ static LLVMValueRef visit_load_push_constant(struct ac_nir_context *ctx, nir_int /* Load constant values from user SGPRS when possible, otherwise * fallback to the default path that loads directly from memory. */ - if (LLVMIsConstant(src0) && instr->dest.ssa.bit_size >= 32) { - unsigned count = instr->dest.ssa.num_components; + if (LLVMIsConstant(src0) && instr->def.bit_size >= 32) { + unsigned count = instr->def.num_components; unsigned offset = index; - if (instr->dest.ssa.bit_size == 64) + if (instr->def.bit_size == 64) count *= 2; offset += LLVMConstIntGetZExtValue(src0); @@ -1585,8 +1585,8 @@ static LLVMValueRef visit_load_push_constant(struct ac_nir_context *ctx, nir_int for (unsigned i = 0; i < count; i++) push_constants[i] = ac_get_arg(&ctx->ac, ctx->args->inline_push_consts[arg_index++]); LLVMValueRef res = ac_build_gather_values(&ctx->ac, push_constants, count); - return instr->dest.ssa.bit_size == 64 - ? LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->dest.ssa), "") + return instr->def.bit_size == 64 + ? LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->def), "") : res; } } @@ -1594,8 +1594,8 @@ static LLVMValueRef visit_load_push_constant(struct ac_nir_context *ctx, nir_int struct ac_llvm_pointer pc = ac_get_ptr_arg(&ctx->ac, ctx->args, ctx->args->push_constants); ptr = LLVMBuildGEP2(ctx->ac.builder, pc.t, pc.v, &addr, 1, ""); - if (instr->dest.ssa.bit_size == 8) { - unsigned load_dwords = instr->dest.ssa.num_components > 1 ? 2 : 1; + if (instr->def.bit_size == 8) { + unsigned load_dwords = instr->def.num_components > 1 ? 2 : 1; LLVMTypeRef vec_type = LLVMVectorType(ctx->ac.i8, 4 * load_dwords); ptr = ac_cast_ptr(&ctx->ac, ptr, vec_type); LLVMValueRef res = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, ""); @@ -1617,13 +1617,13 @@ static LLVMValueRef visit_load_push_constant(struct ac_nir_context *ctx, nir_int res = LLVMBuildTrunc( ctx->ac.builder, res, - LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.num_components * 8), ""); - if (instr->dest.ssa.num_components > 1) + LLVMIntTypeInContext(ctx->ac.context, instr->def.num_components * 8), ""); + if (instr->def.num_components > 1) res = LLVMBuildBitCast(ctx->ac.builder, res, - LLVMVectorType(ctx->ac.i8, instr->dest.ssa.num_components), ""); + LLVMVectorType(ctx->ac.i8, instr->def.num_components), ""); return res; - } else if (instr->dest.ssa.bit_size == 16) { - unsigned load_dwords = instr->dest.ssa.num_components / 2 + 1; + } else if (instr->def.bit_size == 16) { + unsigned load_dwords = instr->def.num_components / 2 + 1; LLVMTypeRef vec_type = LLVMVectorType(ctx->ac.i16, 2 * load_dwords); ptr = ac_cast_ptr(&ctx->ac, ptr, vec_type); LLVMValueRef res = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, ""); @@ -1634,17 +1634,17 @@ static LLVMValueRef visit_load_push_constant(struct ac_nir_context *ctx, nir_int ctx->ac.i32_0, ctx->ac.i32_1, LLVMConstInt(ctx->ac.i32, 2, false), LLVMConstInt(ctx->ac.i32, 3, false), LLVMConstInt(ctx->ac.i32, 4, false)}; - LLVMValueRef swizzle_aligned = LLVMConstVector(&mask[0], instr->dest.ssa.num_components); - LLVMValueRef swizzle_unaligned = LLVMConstVector(&mask[1], instr->dest.ssa.num_components); + LLVMValueRef swizzle_aligned = LLVMConstVector(&mask[0], instr->def.num_components); + LLVMValueRef swizzle_unaligned = LLVMConstVector(&mask[1], instr->def.num_components); LLVMValueRef shuffle_aligned = LLVMBuildShuffleVector(ctx->ac.builder, res, res, swizzle_aligned, ""); LLVMValueRef shuffle_unaligned = LLVMBuildShuffleVector(ctx->ac.builder, res, res, swizzle_unaligned, ""); res = LLVMBuildSelect(ctx->ac.builder, cond, shuffle_unaligned, shuffle_aligned, ""); - return LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->dest.ssa), ""); + return LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->def), ""); } - LLVMTypeRef ptr_type = get_def_type(ctx, &instr->dest.ssa); + LLVMTypeRef ptr_type = get_def_type(ctx, &instr->def); ptr = ac_cast_ptr(&ctx->ac, ptr, ptr_type); return LLVMBuildLoad2(ctx->ac.builder, ptr_type, ptr, ""); @@ -1941,7 +1941,7 @@ static LLVMValueRef visit_load_buffer(struct ac_nir_context *ctx, nir_intrinsic_ struct waterfall_context wctx; LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[0]); - int elem_size_bytes = instr->dest.ssa.bit_size / 8; + int elem_size_bytes = instr->def.bit_size / 8; int num_components = instr->num_components; enum gl_access_qualifier access = ac_get_mem_access_flags(instr); @@ -1950,7 +1950,7 @@ static LLVMValueRef visit_load_buffer(struct ac_nir_context *ctx, nir_intrinsic_ ctx->abi->load_ssbo(ctx->abi, rsrc_base, false, false) : rsrc_base; LLVMValueRef vindex = ctx->ac.i32_0; - LLVMTypeRef def_type = get_def_type(ctx, &instr->dest.ssa); + LLVMTypeRef def_type = get_def_type(ctx, &instr->def); LLVMTypeRef def_elem_type = num_components > 1 ? LLVMGetElementType(def_type) : def_type; LLVMValueRef results[4]; @@ -2035,7 +2035,7 @@ static LLVMValueRef get_global_address(struct ac_nir_context *ctx, static LLVMValueRef visit_load_global(struct ac_nir_context *ctx, nir_intrinsic_instr *instr) { - LLVMTypeRef result_type = get_def_type(ctx, &instr->dest.ssa); + LLVMTypeRef result_type = get_def_type(ctx, &instr->def); LLVMValueRef val; LLVMValueRef addr = get_global_address(ctx, instr, result_type); @@ -2119,18 +2119,18 @@ static LLVMValueRef visit_load_ubo_buffer(struct ac_nir_context *ctx, nir_intrin LLVMValueRef offset = get_src(ctx, instr->src[1]); int num_components = instr->num_components; - assert(instr->dest.ssa.bit_size >= 32 && instr->dest.ssa.bit_size % 32 == 0); + assert(instr->def.bit_size >= 32 && instr->def.bit_size % 32 == 0); if (ctx->abi->load_ubo) rsrc = ctx->abi->load_ubo(ctx->abi, rsrc); /* Convert to a 32-bit load. */ - if (instr->dest.ssa.bit_size == 64) + if (instr->def.bit_size == 64) num_components *= 2; ret = ac_build_buffer_load(&ctx->ac, rsrc, num_components, NULL, offset, NULL, ctx->ac.f32, 0, true, true); - ret = LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->dest.ssa), ""); + ret = LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), ""); return exit_waterfall(ctx, &wctx, ret); } @@ -2326,8 +2326,8 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intri args.tfe = instr->intrinsic == nir_intrinsic_bindless_image_sparse_load; if (dim == GLSL_SAMPLER_DIM_BUF) { - unsigned num_channels = util_last_bit(nir_def_components_read(&instr->dest.ssa)); - if (instr->dest.ssa.bit_size == 64) + unsigned num_channels = util_last_bit(nir_def_components_read(&instr->def)); + if (instr->def.bit_size == 64) num_channels = num_channels < 4 ? 2 : 4; LLVMValueRef rsrc, vindex; @@ -2338,11 +2338,11 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intri bool can_speculate = access & ACCESS_CAN_REORDER; res = ac_build_buffer_load_format(&ctx->ac, rsrc, vindex, ctx->ac.i32_0, num_channels, args.access, can_speculate, - instr->dest.ssa.bit_size == 16, + instr->def.bit_size == 16, args.tfe); res = ac_build_expand(&ctx->ac, res, num_channels, args.tfe ? 5 : 4); - res = ac_trim_vector(&ctx->ac, res, instr->dest.ssa.num_components); + res = ac_trim_vector(&ctx->ac, res, instr->def.num_components); res = ac_to_integer(&ctx->ac, res); } else if (instr->intrinsic == nir_intrinsic_bindless_image_fragment_mask_load_amd) { assert(ctx->ac.gfx_level < GFX11); @@ -2368,12 +2368,12 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intri args.dmask = 15; args.attributes = access & ACCESS_CAN_REORDER ? AC_ATTR_INVARIANT_LOAD : 0; - args.d16 = instr->dest.ssa.bit_size == 16; + args.d16 = instr->def.bit_size == 16; res = ac_build_image_opcode(&ctx->ac, &args); } - if (instr->dest.ssa.bit_size == 64) { + if (instr->def.bit_size == 64) { LLVMValueRef code = NULL; if (args.tfe) { code = ac_llvm_extract_elem(&ctx->ac, res, 4); @@ -2525,7 +2525,7 @@ static LLVMValueRef visit_image_atomic(struct ac_nir_context *ctx, const nir_int params[param_count++] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]), ctx->ac.i32_0, ""); /* vindex */ params[param_count++] = ctx->ac.i32_0; /* voffset */ - if (cmpswap && instr->dest.ssa.bit_size == 64) { + if (cmpswap && instr->def.bit_size == 64) { result = emit_ssbo_comp_swap_64(ctx, params[2], params[3], params[1], params[0], true); } else { LLVMTypeRef data_type = LLVMTypeOf(params[0]); @@ -2637,7 +2637,7 @@ static LLVMValueRef visit_load_shared(struct ac_nir_context *ctx, const nir_intr LLVMValueRef values[16], derived_ptr, index, ret; unsigned const_off = nir_intrinsic_base(instr); - LLVMTypeRef elem_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size); + LLVMTypeRef elem_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size); LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], const_off); for (int chan = 0; chan < instr->num_components; chan++) { @@ -2648,7 +2648,7 @@ static LLVMValueRef visit_load_shared(struct ac_nir_context *ctx, const nir_intr ret = ac_build_gather_values(&ctx->ac, values, instr->num_components); - return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->dest.ssa), ""); + return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), ""); } static void visit_store_shared(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr) @@ -2676,7 +2676,7 @@ static void visit_store_shared(struct ac_nir_context *ctx, const nir_intrinsic_i static LLVMValueRef visit_load_shared2_amd(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr) { - LLVMTypeRef pointee_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size); + LLVMTypeRef pointee_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size); LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], 0); LLVMValueRef values[2]; @@ -2689,7 +2689,7 @@ static LLVMValueRef visit_load_shared2_amd(struct ac_nir_context *ctx, } LLVMValueRef ret = ac_build_gather_values(&ctx->ac, values, 2); - return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->dest.ssa), ""); + return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), ""); } static void visit_store_shared2_amd(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr) @@ -2905,17 +2905,17 @@ static LLVMValueRef visit_load(struct ac_nir_context *ctx, nir_intrinsic_instr * bool is_output) { LLVMValueRef values[8]; - LLVMTypeRef dest_type = get_def_type(ctx, &instr->dest.ssa); + LLVMTypeRef dest_type = get_def_type(ctx, &instr->def); LLVMTypeRef component_type; unsigned base = nir_intrinsic_base(instr); unsigned component = nir_intrinsic_component(instr); - unsigned count = instr->dest.ssa.num_components; + unsigned count = instr->def.num_components; nir_src *vertex_index_src = nir_get_io_arrayed_index_src(instr); LLVMValueRef vertex_index = vertex_index_src ? get_src(ctx, *vertex_index_src) : NULL; nir_src offset = *nir_get_io_offset_src(instr); LLVMValueRef indir_index = NULL; - switch (instr->dest.ssa.bit_size) { + switch (instr->def.bit_size) { case 16: case 32: break; @@ -2945,7 +2945,7 @@ static LLVMValueRef visit_load(struct ac_nir_context *ctx, nir_intrinsic_instr * vertex_index, indir_index, base, component, count, !is_output); - if (instr->dest.ssa.bit_size == 16) { + if (instr->def.bit_size == 16) { result = ac_to_integer(&ctx->ac, result); result = LLVMBuildTrunc(ctx->ac.builder, result, dest_type, ""); } @@ -2981,12 +2981,12 @@ static LLVMValueRef visit_load(struct ac_nir_context *ctx, nir_intrinsic_instr * values[chan] = ac_build_fs_interp_mov(&ctx->ac, vertex_id, llvm_chan, attr_number, ac_get_arg(&ctx->ac, ctx->args->prim_mask)); values[chan] = LLVMBuildBitCast(ctx->ac.builder, values[chan], ctx->ac.i32, ""); - if (instr->dest.ssa.bit_size == 16 && + if (instr->def.bit_size == 16 && nir_intrinsic_io_semantics(instr).high_16bits) values[chan] = LLVMBuildLShr(ctx->ac.builder, values[chan], LLVMConstInt(ctx->ac.i32, 16, 0), ""); values[chan] = LLVMBuildTruncOrBitCast(ctx->ac.builder, values[chan], - instr->dest.ssa.bit_size == 16 ? ctx->ac.i16 : ctx->ac.i32, ""); + instr->def.bit_size == 16 ? ctx->ac.i16 : ctx->ac.i32, ""); } LLVMValueRef result = ac_build_gather_values(&ctx->ac, values, count); @@ -3035,8 +3035,8 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins switch (instr->intrinsic) { case nir_intrinsic_ballot: result = ac_build_ballot(&ctx->ac, get_src(ctx, instr->src[0])); - if (instr->dest.ssa.bit_size > ctx->ac.wave_size) { - LLVMTypeRef dest_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size); + if (instr->def.bit_size > ctx->ac.wave_size) { + LLVMTypeRef dest_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size); result = LLVMBuildZExt(ctx->ac.builder, result, dest_type, ""); } break; @@ -3057,7 +3057,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins values[i] = ctx->args->workgroup_ids[i].used ? ac_get_arg(&ctx->ac, ctx->args->workgroup_ids[i]) : ctx->ac.i32_0; - if (instr->dest.ssa.bit_size == 64) + if (instr->def.bit_size == 64) values[i] = LLVMBuildZExt(ctx->ac.builder, values[i], ctx->ac.i64, ""); } @@ -3156,7 +3156,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins result = ac_build_load_invariant(&ctx->ac, ac_get_ptr_arg(&ctx->ac, ctx->args, ctx->args->num_work_groups), ctx->ac.i32_0); } - if (instr->dest.ssa.bit_size == 64) + if (instr->def.bit_size == 64) result = LLVMBuildZExt(ctx->ac.builder, result, LLVMVectorType(ctx->ac.i64, 3), ""); break; case nir_intrinsic_load_local_invocation_index: @@ -3305,7 +3305,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins unsigned index = nir_intrinsic_base(instr); unsigned component = nir_intrinsic_component(instr); result = load_interpolated_input(ctx, interp_param, index, component, - instr->dest.ssa.num_components, instr->dest.ssa.bit_size, + instr->def.num_components, instr->def.bit_size, nir_intrinsic_io_semantics(instr).high_16bits); break; } @@ -3418,10 +3418,10 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins case nir_intrinsic_load_scratch: { LLVMValueRef offset = get_src(ctx, instr->src[0]); LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->scratch, offset); - LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size); - LLVMTypeRef vec_type = instr->dest.ssa.num_components == 1 + LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size); + LLVMTypeRef vec_type = instr->def.num_components == 1 ? comp_type - : LLVMVectorType(comp_type, instr->dest.ssa.num_components); + : LLVMVectorType(comp_type, instr->def.num_components); result = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, ""); break; } @@ -3457,10 +3457,10 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins offset = LLVMBuildSelect(ctx->ac.builder, cond, offset, size, ""); LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->constant_data, offset); - LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size); - LLVMTypeRef vec_type = instr->dest.ssa.num_components == 1 + LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size); + LLVMTypeRef vec_type = instr->def.num_components == 1 ? comp_type - : LLVMVectorType(comp_type, instr->dest.ssa.num_components); + : LLVMVectorType(comp_type, instr->def.num_components); result = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, ""); break; } @@ -3479,7 +3479,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins LLVMValueRef addr_voffset = get_src(ctx, instr->src[src_base + 1]); LLVMValueRef addr_soffset = get_src(ctx, instr->src[src_base + 2]); LLVMValueRef vidx = idxen ? get_src(ctx, instr->src[src_base + 3]) : NULL; - unsigned num_components = instr->dest.ssa.num_components; + unsigned num_components = instr->def.num_components; unsigned const_offset = nir_intrinsic_base(instr); bool reorder = nir_intrinsic_can_reorder(instr); enum gl_access_qualifier access = ac_get_mem_access_flags(instr); @@ -3489,10 +3489,10 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins LLVMConstInt(ctx->ac.i32, const_offset, 0), ""); if (instr->intrinsic == nir_intrinsic_load_buffer_amd && uses_format) { - assert(instr->dest.ssa.bit_size == 16 || instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 16 || instr->def.bit_size == 32); result = ac_build_buffer_load_format(&ctx->ac, descriptor, vidx, voffset, num_components, access, reorder, - instr->dest.ssa.bit_size == 16, false); + instr->def.bit_size == 16, false); result = ac_to_integer(&ctx->ac, result); } else if (instr->intrinsic == nir_intrinsic_store_buffer_amd && uses_format) { assert(instr->src[0].ssa->bit_size == 16 || instr->src[0].ssa->bit_size == 32); @@ -3503,9 +3503,9 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins * Workaround by using i32 and casting to the correct type later. */ const unsigned fetch_num_components = - num_components * MAX2(32, instr->dest.ssa.bit_size) / 32; + num_components * MAX2(32, instr->def.bit_size) / 32; LLVMTypeRef channel_type = - LLVMIntTypeInContext(ctx->ac.context, MIN2(32, instr->dest.ssa.bit_size)); + LLVMIntTypeInContext(ctx->ac.context, MIN2(32, instr->def.bit_size)); if (instr->intrinsic == nir_intrinsic_load_buffer_amd) { result = ac_build_buffer_load(&ctx->ac, descriptor, fetch_num_components, vidx, voffset, @@ -3527,9 +3527,9 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins result = ac_trim_vector(&ctx->ac, result, fetch_num_components); /* Cast to larger than 32-bit sized components if needed. */ - if (instr->dest.ssa.bit_size > 32) { + if (instr->def.bit_size > 32) { LLVMTypeRef cast_channel_type = - LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size); + LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size); LLVMTypeRef cast_type = num_components == 1 ? cast_channel_type : LLVMVectorType(cast_channel_type, num_components); @@ -3600,7 +3600,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins arg.used = true; result = ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, arg)); if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(result)) != 32) - result = LLVMBuildBitCast(ctx->ac.builder, result, get_def_type(ctx, &instr->dest.ssa), ""); + result = LLVMBuildBitCast(ctx->ac.builder, result, get_def_type(ctx, &instr->def), ""); break; } case nir_intrinsic_load_smem_amd: { @@ -3610,7 +3610,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins bool is_addr_32bit = nir_src_bit_size(instr->src[0]) == 32; int addr_space = is_addr_32bit ? AC_ADDR_SPACE_CONST_32BIT : AC_ADDR_SPACE_CONST; - LLVMTypeRef result_type = get_def_type(ctx, &instr->dest.ssa); + LLVMTypeRef result_type = get_def_type(ctx, &instr->def); LLVMTypeRef byte_ptr_type = LLVMPointerType(ctx->ac.i8, addr_space); LLVMValueRef addr = LLVMBuildIntToPtr(ctx->ac.builder, base, byte_ptr_type, ""); @@ -3785,7 +3785,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins return false; } if (result) { - ctx->ssa_defs[instr->dest.ssa.index] = result; + ctx->ssa_defs[instr->def.index] = result; } return true; } @@ -4067,7 +4067,7 @@ static void visit_tex(struct ac_nir_context *ctx, nir_tex_instr *instr) args.sampler = LLVMBuildInsertElement(ctx->ac.builder, args.sampler, dword0, ctx->ac.i32_0, ""); } - args.d16 = instr->dest.ssa.bit_size == 16; + args.d16 = instr->def.bit_size == 16; args.tfe = instr->is_sparse; result = build_tex_intrinsic(ctx, instr, &args); @@ -4090,7 +4090,7 @@ static void visit_tex(struct ac_nir_context *ctx, nir_tex_instr *instr) LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, ""), LLVMConstInt(ctx->ac.i32, 0x76543210, false), ""); } else if (nir_tex_instr_result_size(instr) != 4) - result = ac_trim_vector(&ctx->ac, result, instr->dest.ssa.num_components); + result = ac_trim_vector(&ctx->ac, result, instr->def.num_components); if (instr->is_sparse) result = ac_build_concat(&ctx->ac, result, code); @@ -4102,16 +4102,16 @@ static void visit_tex(struct ac_nir_context *ctx, nir_tex_instr *instr) result = exit_waterfall(ctx, wctx + i, result); } - ctx->ssa_defs[instr->dest.ssa.index] = result; + ctx->ssa_defs[instr->def.index] = result; } } static void visit_phi(struct ac_nir_context *ctx, nir_phi_instr *instr) { - LLVMTypeRef type = get_def_type(ctx, &instr->dest.ssa); + LLVMTypeRef type = get_def_type(ctx, &instr->def); LLVMValueRef result = LLVMBuildPhi(ctx->ac.builder, type, ""); - ctx->ssa_defs[instr->dest.ssa.index] = result; + ctx->ssa_defs[instr->def.index] = result; _mesa_hash_table_insert(ctx->phis, instr, result); } diff --git a/src/amd/vulkan/meta/radv_meta_bufimage.c b/src/amd/vulkan/meta/radv_meta_bufimage.c index fddfac9..12cc651 100644 --- a/src/amd/vulkan/meta/radv_meta_bufimage.c +++ b/src/amd/vulkan/meta/radv_meta_bufimage.c @@ -63,7 +63,7 @@ build_nir_itob_compute_shader(struct radv_device *dev, bool is_3d) nir_def *coord = nir_replicate(&b, tmp, 4); - nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_undef(&b, 1, 32), outval, + nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, coord, nir_undef(&b, 1, 32), outval, nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_BUF); return b.shader; @@ -213,7 +213,7 @@ build_nir_btoi_compute_shader(struct radv_device *dev, bool is_3d) nir_def *img_coord = nir_vec4(&b, nir_channel(&b, coord, 0), nir_channel(&b, coord, 1), is_3d ? nir_channel(&b, coord, 2) : nir_undef(&b, 1, 32), nir_undef(&b, 1, 32)); - nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_undef(&b, 1, 32), outval, + nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, img_coord, nir_undef(&b, 1, 32), outval, nir_imm_int(&b, 0), .image_dim = dim); return b.shader; @@ -367,7 +367,7 @@ build_nir_btoi_r32g32b32_compute_shader(struct radv_device *dev) nir_def *coord = nir_replicate(&b, local_pos, 4); - nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_undef(&b, 1, 32), + nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, coord, nir_undef(&b, 1, 32), nir_channel(&b, outval, chan), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_BUF); } @@ -494,8 +494,8 @@ build_nir_itoi_compute_shader(struct radv_device *dev, bool is_3d, int samples) is_3d ? nir_channel(&b, dst_coord, 2) : nir_undef(&b, 1, 32), nir_undef(&b, 1, 32)); for (uint32_t i = 0; i < samples; i++) { - nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_imm_int(&b, i), - tex_vals[i], nir_imm_int(&b, 0), .image_dim = dim); + nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, img_coord, nir_imm_int(&b, i), tex_vals[i], + nir_imm_int(&b, 0), .image_dim = dim); } return b.shader; @@ -665,7 +665,7 @@ build_nir_itoi_r32g32b32_compute_shader(struct radv_device *dev) nir_def *dst_coord = nir_replicate(&b, dst_local_pos, 4); - nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, dst_coord, nir_undef(&b, 1, 32), + nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, dst_coord, nir_undef(&b, 1, 32), nir_channel(&b, outval, 0), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_BUF); } @@ -779,8 +779,8 @@ build_nir_cleari_compute_shader(struct radv_device *dev, bool is_3d, int samples global_id = nir_vec(&b, comps, 4); for (uint32_t i = 0; i < samples; i++) { - nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, global_id, nir_imm_int(&b, i), - clear_val, nir_imm_int(&b, 0), .image_dim = dim); + nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, global_id, nir_imm_int(&b, i), clear_val, + nir_imm_int(&b, 0), .image_dim = dim); } return b.shader; @@ -929,7 +929,7 @@ build_nir_cleari_r32g32b32_compute_shader(struct radv_device *dev) nir_def *coord = nir_replicate(&b, local_pos, 4); - nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_undef(&b, 1, 32), + nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, coord, nir_undef(&b, 1, 32), nir_channel(&b, clear_val, chan), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_BUF); } diff --git a/src/amd/vulkan/meta/radv_meta_clear.c b/src/amd/vulkan/meta/radv_meta_clear.c index 4e97082..9d0d024 100644 --- a/src/amd/vulkan/meta/radv_meta_clear.c +++ b/src/amd/vulkan/meta/radv_meta_clear.c @@ -929,7 +929,7 @@ build_clear_dcc_comp_to_single_shader(struct radv_device *dev, bool is_msaa) /* Store the clear color values. */ nir_def *sample_id = is_msaa ? nir_imm_int(&b, 0) : nir_undef(&b, 1, 32); - nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, sample_id, data, nir_imm_int(&b, 0), + nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, coord, sample_id, data, nir_imm_int(&b, 0), .image_dim = dim, .image_array = true); return b.shader; diff --git a/src/amd/vulkan/meta/radv_meta_dcc_retile.c b/src/amd/vulkan/meta/radv_meta_dcc_retile.c index 4613d2d..9523fe7 100644 --- a/src/amd/vulkan/meta/radv_meta_dcc_retile.c +++ b/src/amd/vulkan/meta/radv_meta_dcc_retile.c @@ -51,8 +51,8 @@ build_dcc_retile_compute_shader(struct radv_device *dev, struct radeon_surf *sur output_dcc->data.descriptor_set = 0; output_dcc->data.binding = 1; - nir_def *input_dcc_ref = &nir_build_deref_var(&b, input_dcc)->dest.ssa; - nir_def *output_dcc_ref = &nir_build_deref_var(&b, output_dcc)->dest.ssa; + nir_def *input_dcc_ref = &nir_build_deref_var(&b, input_dcc)->def; + nir_def *output_dcc_ref = &nir_build_deref_var(&b, output_dcc)->def; nir_def *coord = get_global_ids(&b, 2); nir_def *zero = nir_imm_int(&b, 0); diff --git a/src/amd/vulkan/meta/radv_meta_decompress.c b/src/amd/vulkan/meta/radv_meta_decompress.c index 1a0b51e..c9b1a95 100644 --- a/src/amd/vulkan/meta/radv_meta_decompress.c +++ b/src/amd/vulkan/meta/radv_meta_decompress.c @@ -58,7 +58,7 @@ build_expand_depth_stencil_compute_shader(struct radv_device *dev) nir_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id); - nir_def *data = nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->dest.ssa, global_id, + nir_def *data = nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->def, global_id, nir_undef(&b, 1, 32), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D); /* We need a SCOPE_DEVICE memory_scope because ACO will avoid @@ -68,7 +68,7 @@ build_expand_depth_stencil_compute_shader(struct radv_device *dev) nir_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE, .memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo); - nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, global_id, nir_undef(&b, 1, 32), data, + nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, global_id, nir_undef(&b, 1, 32), data, nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D); return b.shader; } diff --git a/src/amd/vulkan/meta/radv_meta_etc_decode.c b/src/amd/vulkan/meta/radv_meta_etc_decode.c index 4f88869..0048662 100644 --- a/src/amd/vulkan/meta/radv_meta_etc_decode.c +++ b/src/amd/vulkan/meta/radv_meta_etc_decode.c @@ -426,13 +426,13 @@ build_shader(struct radv_device *dev) nir_push_if(&b, is_3d); { - nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_3d)->dest.ssa, img_coord, nir_undef(&b, 1, 32), - outval, nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_3D); + nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_3d)->def, img_coord, nir_undef(&b, 1, 32), outval, + nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_3D); } nir_push_else(&b, NULL); { - nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_2d)->dest.ssa, img_coord, nir_undef(&b, 1, 32), - outval, nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D, .image_array = true); + nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_2d)->def, img_coord, nir_undef(&b, 1, 32), outval, + nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D, .image_array = true); } nir_pop_if(&b, NULL); return b.shader; diff --git a/src/amd/vulkan/meta/radv_meta_fast_clear.c b/src/amd/vulkan/meta/radv_meta_fast_clear.c index 6cd3575..8af0aeb 100644 --- a/src/amd/vulkan/meta/radv_meta_fast_clear.c +++ b/src/amd/vulkan/meta/radv_meta_fast_clear.c @@ -56,7 +56,7 @@ build_dcc_decompress_compute_shader(struct radv_device *dev) nir_def *img_coord = nir_vec4(&b, nir_channel(&b, global_id, 0), nir_channel(&b, global_id, 1), nir_undef(&b, 1, 32), nir_undef(&b, 1, 32)); - nir_def *data = nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->dest.ssa, img_coord, + nir_def *data = nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->def, img_coord, nir_undef(&b, 1, 32), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D); /* We need a SCOPE_DEVICE memory_scope because ACO will avoid @@ -66,7 +66,7 @@ build_dcc_decompress_compute_shader(struct radv_device *dev) nir_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE, .memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo); - nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_undef(&b, 1, 32), data, + nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, img_coord, nir_undef(&b, 1, 32), data, nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D); return b.shader; } diff --git a/src/amd/vulkan/meta/radv_meta_fmask_copy.c b/src/amd/vulkan/meta/radv_meta_fmask_copy.c index aadfa92..5d2f2d1 100644 --- a/src/amd/vulkan/meta/radv_meta_fmask_copy.c +++ b/src/amd/vulkan/meta/radv_meta_fmask_copy.c @@ -88,7 +88,7 @@ build_fmask_copy_compute_shader(struct radv_device *dev, int samples) nir_def *outval = nir_build_tex_deref_instr(&b, nir_texop_fragment_fetch_amd, nir_build_deref_var(&b, input_img), NULL, ARRAY_SIZE(frag_fetch_srcs), frag_fetch_srcs); - nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, dst_coord, sample_id, outval, + nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, dst_coord, sample_id, outval, nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_MS); radv_break_on_count(&b, counter, max_sample_index); diff --git a/src/amd/vulkan/meta/radv_meta_fmask_expand.c b/src/amd/vulkan/meta/radv_meta_fmask_expand.c index 8b9a738..fbfd74c 100644 --- a/src/amd/vulkan/meta/radv_meta_fmask_expand.c +++ b/src/amd/vulkan/meta/radv_meta_fmask_expand.c @@ -48,7 +48,7 @@ build_fmask_expand_compute_shader(struct radv_device *device, int samples) output_img->data.access = ACCESS_NON_READABLE; nir_deref_instr *input_img_deref = nir_build_deref_var(&b, input_img); - nir_def *output_img_deref = &nir_build_deref_var(&b, output_img)->dest.ssa; + nir_def *output_img_deref = &nir_build_deref_var(&b, output_img)->def; nir_def *tex_coord = get_global_ids(&b, 3); diff --git a/src/amd/vulkan/meta/radv_meta_resolve_cs.c b/src/amd/vulkan/meta/radv_meta_resolve_cs.c index b670834..a2e2f6d 100644 --- a/src/amd/vulkan/meta/radv_meta_resolve_cs.c +++ b/src/amd/vulkan/meta/radv_meta_resolve_cs.c @@ -81,7 +81,7 @@ build_resolve_compute_shader(struct radv_device *dev, bool is_integer, bool is_s nir_def *img_coord = nir_vec4(&b, nir_channel(&b, dst_coord, 0), nir_channel(&b, dst_coord, 1), nir_undef(&b, 1, 32), nir_undef(&b, 1, 32)); - nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_undef(&b, 1, 32), outval, + nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, img_coord, nir_undef(&b, 1, 32), outval, nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D); return b.shader; } @@ -174,7 +174,7 @@ build_depth_stencil_resolve_compute_shader(struct radv_device *dev, int samples, nir_def *coord = nir_vec4(&b, nir_channel(&b, img_coord, 0), nir_channel(&b, img_coord, 1), nir_channel(&b, img_coord, 2), nir_undef(&b, 1, 32)); - nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_undef(&b, 1, 32), outval, + nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, coord, nir_undef(&b, 1, 32), outval, nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D, .image_array = true); return b.shader; } diff --git a/src/amd/vulkan/nir/radv_nir_apply_pipeline_layout.c b/src/amd/vulkan/nir/radv_nir_apply_pipeline_layout.c index c3f6adc..535ccaf 100644 --- a/src/amd/vulkan/nir/radv_nir_apply_pipeline_layout.c +++ b/src/amd/vulkan/nir/radv_nir_apply_pipeline_layout.c @@ -97,9 +97,9 @@ visit_vulkan_resource_index(nir_builder *b, apply_layout_state *state, nir_intri if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR) { assert(stride == 16); - nir_def_rewrite_uses(&intrin->dest.ssa, nir_pack_64_2x32_split(b, set_ptr, binding_ptr)); + nir_def_rewrite_uses(&intrin->def, nir_pack_64_2x32_split(b, set_ptr, binding_ptr)); } else { - nir_def_rewrite_uses(&intrin->dest.ssa, nir_vec3(b, set_ptr, binding_ptr, nir_imm_int(b, stride))); + nir_def_rewrite_uses(&intrin->def, nir_vec3(b, set_ptr, binding_ptr, nir_imm_int(b, stride))); } nir_instr_remove(&intrin->instr); } @@ -117,7 +117,7 @@ visit_vulkan_resource_reindex(nir_builder *b, apply_layout_state *state, nir_int binding_ptr = nir_iadd_nuw(b, binding_ptr, index); - nir_def_rewrite_uses(&intrin->dest.ssa, nir_pack_64_2x32_split(b, set_ptr, binding_ptr)); + nir_def_rewrite_uses(&intrin->def, nir_pack_64_2x32_split(b, set_ptr, binding_ptr)); } else { assert(desc_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER); @@ -129,7 +129,7 @@ visit_vulkan_resource_reindex(nir_builder *b, apply_layout_state *state, nir_int binding_ptr = nir_iadd_nuw(b, binding_ptr, index); - nir_def_rewrite_uses(&intrin->dest.ssa, nir_vector_insert_imm(b, intrin->src[0].ssa, binding_ptr, 1)); + nir_def_rewrite_uses(&intrin->def, nir_vector_insert_imm(b, intrin->src[0].ssa, binding_ptr, 1)); } nir_instr_remove(&intrin->instr); } @@ -143,9 +143,9 @@ visit_load_vulkan_descriptor(nir_builder *b, apply_layout_state *state, nir_intr nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa))); nir_def *desc = nir_build_load_global(b, 1, 64, addr, .access = ACCESS_NON_WRITEABLE); - nir_def_rewrite_uses(&intrin->dest.ssa, desc); + nir_def_rewrite_uses(&intrin->def, desc); } else { - nir_def_rewrite_uses(&intrin->dest.ssa, nir_vector_insert_imm(b, intrin->src[0].ssa, nir_imm_int(b, 0), 2)); + nir_def_rewrite_uses(&intrin->def, nir_vector_insert_imm(b, intrin->src[0].ssa, nir_imm_int(b, 0), 2)); } nir_instr_remove(&intrin->instr); } @@ -211,7 +211,7 @@ visit_get_ssbo_size(nir_builder *b, apply_layout_state *state, nir_intrinsic_ins size = nir_channel(b, desc, 2); } - nir_def_rewrite_uses(&intrin->dest.ssa, size); + nir_def_rewrite_uses(&intrin->def, size); nir_instr_remove(&intrin->instr); } @@ -358,7 +358,7 @@ update_image_intrinsic(nir_builder *b, apply_layout_state *state, nir_intrinsic_ nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM, NULL, !is_load); if (intrin->intrinsic == nir_intrinsic_image_deref_descriptor_amd) { - nir_def_rewrite_uses(&intrin->dest.ssa, desc); + nir_def_rewrite_uses(&intrin->def, desc); nir_instr_remove(&intrin->instr); } else { nir_rewrite_image_intrinsic(intrin, desc, true); @@ -477,7 +477,7 @@ apply_layout_to_tex(nir_builder *b, apply_layout_state *state, nir_tex_instr *te } if (tex->op == nir_texop_descriptor_amd) { - nir_def_rewrite_uses(&tex->dest.ssa, image); + nir_def_rewrite_uses(&tex->def, image); nir_instr_remove(&tex->instr); return; } diff --git a/src/amd/vulkan/nir/radv_nir_lower_abi.c b/src/amd/vulkan/nir/radv_nir_lower_abi.c index f0cd4de..cace173 100644 --- a/src/amd/vulkan/nir/radv_nir_lower_abi.c +++ b/src/amd/vulkan/nir/radv_nir_lower_abi.c @@ -499,7 +499,7 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state) return false; if (replacement) - nir_def_rewrite_uses(&intrin->dest.ssa, replacement); + nir_def_rewrite_uses(&intrin->def, replacement); nir_instr_remove(instr); nir_instr_free(instr); diff --git a/src/amd/vulkan/nir/radv_nir_lower_fs_barycentric.c b/src/amd/vulkan/nir/radv_nir_lower_fs_barycentric.c index 36c7304..d97c411 100644 --- a/src/amd/vulkan/nir/radv_nir_lower_fs_barycentric.c +++ b/src/amd/vulkan/nir/radv_nir_lower_fs_barycentric.c @@ -250,7 +250,7 @@ lower_load_barycentric_coord(nir_builder *b, lower_fs_barycentric_state *state, } } - nir_def_rewrite_uses(&intrin->dest.ssa, new_dest); + nir_def_rewrite_uses(&intrin->def, new_dest); nir_instr_remove(&intrin->instr); return true; diff --git a/src/amd/vulkan/nir/radv_nir_lower_fs_intrinsics.c b/src/amd/vulkan/nir/radv_nir_lower_fs_intrinsics.c index d992f16..4300f79 100644 --- a/src/amd/vulkan/nir/radv_nir_lower_fs_intrinsics.c +++ b/src/amd/vulkan/nir/radv_nir_lower_fs_intrinsics.c @@ -63,7 +63,7 @@ radv_nir_lower_fs_intrinsics(nir_shader *nir, const struct radv_shader_stage *fs def = sample_coverage; } - nir_def_rewrite_uses(&intrin->dest.ssa, def); + nir_def_rewrite_uses(&intrin->def, def); nir_instr_remove(instr); progress = true; @@ -73,10 +73,10 @@ radv_nir_lower_fs_intrinsics(nir_shader *nir, const struct radv_shader_stage *fs if (!key->adjust_frag_coord_z) continue; - if (!(nir_def_components_read(&intrin->dest.ssa) & (1 << 2))) + if (!(nir_def_components_read(&intrin->def) & (1 << 2))) continue; - nir_def *frag_z = nir_channel(&b, &intrin->dest.ssa, 2); + nir_def *frag_z = nir_channel(&b, &intrin->def, 2); /* adjusted_frag_z = fddx_fine(frag_z) * 0.0625 + frag_z */ nir_def *adjusted_frag_z = nir_fddx_fine(&b, frag_z); @@ -90,8 +90,8 @@ radv_nir_lower_fs_intrinsics(nir_shader *nir, const struct radv_shader_stage *fs nir_def *cond = nir_ieq_imm(&b, x_rate, 1); frag_z = nir_bcsel(&b, cond, adjusted_frag_z, frag_z); - nir_def *new_dest = nir_vector_insert_imm(&b, &intrin->dest.ssa, frag_z, 2); - nir_def_rewrite_uses_after(&intrin->dest.ssa, new_dest, new_dest->parent_instr); + nir_def *new_dest = nir_vector_insert_imm(&b, &intrin->def, frag_z, 2); + nir_def_rewrite_uses_after(&intrin->def, new_dest, new_dest->parent_instr); progress = true; break; @@ -134,7 +134,7 @@ radv_nir_lower_fs_intrinsics(nir_shader *nir, const struct radv_shader_stage *fs } } - nir_def_rewrite_uses(&intrin->dest.ssa, new_dest); + nir_def_rewrite_uses(&intrin->def, new_dest); nir_instr_remove(instr); progress = true; diff --git a/src/amd/vulkan/nir/radv_nir_lower_intrinsics_early.c b/src/amd/vulkan/nir/radv_nir_lower_intrinsics_early.c index 54b6840..6e33689 100644 --- a/src/amd/vulkan/nir/radv_nir_lower_intrinsics_early.c +++ b/src/amd/vulkan/nir/radv_nir_lower_intrinsics_early.c @@ -60,7 +60,7 @@ radv_nir_lower_intrinsics_early(nir_shader *nir, const struct radv_pipeline_key continue; } - nir_def_rewrite_uses(&intrin->dest.ssa, def); + nir_def_rewrite_uses(&intrin->def, def); nir_instr_remove(instr); progress = true; diff --git a/src/amd/vulkan/nir/radv_nir_lower_ray_queries.c b/src/amd/vulkan/nir/radv_nir_lower_ray_queries.c index 36a88db..225f49c 100644 --- a/src/amd/vulkan/nir/radv_nir_lower_ray_queries.c +++ b/src/amd/vulkan/nir/radv_nir_lower_ray_queries.c @@ -701,7 +701,7 @@ radv_nir_lower_ray_queries(struct nir_shader *shader, struct radv_device *device } if (new_dest) - nir_def_rewrite_uses(&intrinsic->dest.ssa, new_dest); + nir_def_rewrite_uses(&intrinsic->def, new_dest); nir_instr_remove(instr); nir_instr_free(instr); diff --git a/src/amd/vulkan/nir/radv_nir_lower_view_index.c b/src/amd/vulkan/nir/radv_nir_lower_view_index.c index ce24dd6..235e8c3 100644 --- a/src/amd/vulkan/nir/radv_nir_lower_view_index.c +++ b/src/amd/vulkan/nir/radv_nir_lower_view_index.c @@ -72,7 +72,7 @@ radv_nir_lower_view_index(nir_shader *nir, bool per_primitive) layer->data.per_primitive = per_primitive; b.cursor = nir_before_instr(instr); nir_def *def = nir_load_var(&b, layer); - nir_def_rewrite_uses(&load->dest.ssa, def); + nir_def_rewrite_uses(&load->def, def); /* Update inputs_read to reflect that the pass added a new input. */ nir->info.inputs_read |= VARYING_BIT_LAYER; diff --git a/src/amd/vulkan/nir/radv_nir_lower_viewport_to_zero.c b/src/amd/vulkan/nir/radv_nir_lower_viewport_to_zero.c index 3c2d56a..600363a 100644 --- a/src/amd/vulkan/nir/radv_nir_lower_viewport_to_zero.c +++ b/src/amd/vulkan/nir/radv_nir_lower_viewport_to_zero.c @@ -51,7 +51,7 @@ radv_nir_lower_viewport_to_zero(nir_shader *nir) b.cursor = nir_before_instr(instr); - nir_def_rewrite_uses(&intr->dest.ssa, nir_imm_zero(&b, 1, 32)); + nir_def_rewrite_uses(&intr->def, nir_imm_zero(&b, 1, 32)); progress = true; break; } diff --git a/src/amd/vulkan/nir/radv_nir_lower_vs_inputs.c b/src/amd/vulkan/nir/radv_nir_lower_vs_inputs.c index 5e0ff61..360baf46 100644 --- a/src/amd/vulkan/nir/radv_nir_lower_vs_inputs.c +++ b/src/amd/vulkan/nir/radv_nir_lower_vs_inputs.c @@ -47,8 +47,8 @@ lower_load_vs_input_from_prolog(nir_builder *b, nir_intrinsic_instr *intrin, low const unsigned base_offset = nir_src_as_uint(*offset_src); const unsigned driver_location = base + base_offset - VERT_ATTRIB_GENERIC0; const unsigned component = nir_intrinsic_component(intrin); - const unsigned bit_size = intrin->dest.ssa.bit_size; - const unsigned num_components = intrin->dest.ssa.num_components; + const unsigned bit_size = intrin->def.bit_size; + const unsigned num_components = intrin->def.num_components; /* 64-bit inputs: they occupy twice as many 32-bit components. * 16-bit inputs: they occupy a 32-bit component (not packed). @@ -209,8 +209,8 @@ lower_load_vs_input(nir_builder *b, nir_intrinsic_instr *intrin, lower_vs_inputs const unsigned base = nir_intrinsic_base(intrin); const unsigned base_offset = nir_src_as_uint(*offset_src); const unsigned location = base + base_offset - VERT_ATTRIB_GENERIC0; - const unsigned bit_size = intrin->dest.ssa.bit_size; - const unsigned dest_num_components = intrin->dest.ssa.num_components; + const unsigned bit_size = intrin->def.bit_size; + const unsigned dest_num_components = intrin->def.num_components; /* Convert the component offset to bit_size units. * (Intrinsic component offset is in 32-bit units.) @@ -225,7 +225,7 @@ lower_load_vs_input(nir_builder *b, nir_intrinsic_instr *intrin, lower_vs_inputs /* Bitmask of components in bit_size units * of the current input load that are actually used. */ - const unsigned dest_use_mask = nir_def_components_read(&intrin->dest.ssa) << component; + const unsigned dest_use_mask = nir_def_components_read(&intrin->def) << component; /* If the input is entirely unused, just replace it with undef. * This is just in case we debug this pass without running DCE first. @@ -406,7 +406,7 @@ lower_vs_input_instr(nir_builder *b, nir_instr *instr, void *state) replacement = lower_load_vs_input(b, intrin, s); } - nir_def_rewrite_uses(&intrin->dest.ssa, replacement); + nir_def_rewrite_uses(&intrin->def, replacement); nir_instr_remove(instr); nir_instr_free(instr); diff --git a/src/amd/vulkan/radv_rt_shader.c b/src/amd/vulkan/radv_rt_shader.c index 7542e32..ab8211c 100644 --- a/src/amd/vulkan/radv_rt_shader.c +++ b/src/amd/vulkan/radv_rt_shader.c @@ -62,7 +62,7 @@ lower_rt_derefs(nir_shader *shader) b.cursor = nir_before_instr(&deref->instr); nir_deref_instr *replacement = nir_build_deref_cast(&b, arg_offset, nir_var_function_temp, deref->var->type, 0); - nir_def_rewrite_uses(&deref->dest.ssa, &replacement->dest.ssa); + nir_def_rewrite_uses(&deref->def, &replacement->def); nir_instr_remove(&deref->instr); } } @@ -560,7 +560,7 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca } if (ret) - nir_def_rewrite_uses(&intr->dest.ssa, ret); + nir_def_rewrite_uses(&intr->def, ret); nir_instr_remove(instr); break; } @@ -600,8 +600,8 @@ lower_hit_attrib_deref(nir_builder *b, nir_instr *instr, void *data) b->cursor = nir_after_instr(instr); if (intrin->intrinsic == nir_intrinsic_load_deref) { - uint32_t num_components = intrin->dest.ssa.num_components; - uint32_t bit_size = intrin->dest.ssa.bit_size; + uint32_t num_components = intrin->def.num_components; + uint32_t bit_size = intrin->def.bit_size; nir_def *components[NIR_MAX_VEC_COMPONENTS]; @@ -626,7 +626,7 @@ lower_hit_attrib_deref(nir_builder *b, nir_instr *instr, void *data) } } - nir_def_rewrite_uses(&intrin->dest.ssa, nir_vec(b, components, num_components)); + nir_def_rewrite_uses(&intrin->def, nir_vec(b, components, num_components)); } else { nir_def *value = intrin->src[1].ssa; uint32_t num_components = value->num_components; @@ -913,12 +913,12 @@ lower_any_hit_for_intersection(nir_shader *any_hit) break; case nir_intrinsic_load_ray_t_max: - nir_def_rewrite_uses(&intrin->dest.ssa, hit_t); + nir_def_rewrite_uses(&intrin->def, hit_t); nir_instr_remove(&intrin->instr); break; case nir_intrinsic_load_ray_hit_kind: - nir_def_rewrite_uses(&intrin->dest.ssa, hit_kind); + nir_def_rewrite_uses(&intrin->def, hit_kind); nir_instr_remove(&intrin->instr); break; @@ -939,8 +939,8 @@ lower_any_hit_for_intersection(nir_shader *any_hit) break; case nir_intrinsic_load_rt_arg_scratch_offset_amd: b->cursor = nir_after_instr(instr); - nir_def *arg_offset = nir_isub(b, &intrin->dest.ssa, scratch_offset); - nir_def_rewrite_uses_after(&intrin->dest.ssa, arg_offset, arg_offset->parent_instr); + nir_def *arg_offset = nir_isub(b, &intrin->def, scratch_offset); + nir_def_rewrite_uses_after(&intrin->def, arg_offset, arg_offset->parent_instr); break; default: @@ -1030,7 +1030,7 @@ nir_lower_intersection_shader(nir_shader *intersection, nir_shader *any_hit) nir_push_if(b, nir_inot(b, nir_load_intersection_opaque_amd(b))); { nir_def *params[] = { - &nir_build_deref_var(b, commit_tmp)->dest.ssa, + &nir_build_deref_var(b, commit_tmp)->def, hit_t, hit_kind, nir_imm_int(b, intersection->scratch_size), @@ -1049,7 +1049,7 @@ nir_lower_intersection_shader(nir_shader *intersection, nir_shader *any_hit) nir_pop_if(b, NULL); nir_def *accepted = nir_load_var(b, commit_tmp); - nir_def_rewrite_uses(&intrin->dest.ssa, accepted); + nir_def_rewrite_uses(&intrin->def, accepted); } } nir_metadata_preserve(impl, nir_metadata_none); diff --git a/src/amd/vulkan/radv_shader_info.c b/src/amd/vulkan/radv_shader_info.c index 8da3400..6f718af 100644 --- a/src/amd/vulkan/radv_shader_info.c +++ b/src/amd/vulkan/radv_shader_info.c @@ -40,8 +40,8 @@ gather_intrinsic_load_input_info(const nir_shader *nir, const nir_intrinsic_inst case MESA_SHADER_VERTEX: { unsigned idx = nir_intrinsic_io_semantics(instr).location; unsigned component = nir_intrinsic_component(instr); - unsigned mask = nir_def_components_read(&instr->dest.ssa); - mask = (instr->dest.ssa.bit_size == 64 ? util_widen_mask(mask, 2) : mask) << component; + unsigned mask = nir_def_components_read(&instr->def); + mask = (instr->def.bit_size == 64 ? util_widen_mask(mask, 2) : mask) << component; info->vs.input_usage_mask[idx] |= mask & 0xf; if (mask >> 4) @@ -115,9 +115,9 @@ gather_push_constant_info(const nir_shader *nir, const nir_intrinsic_instr *inst { info->loads_push_constants = true; - if (nir_src_is_const(instr->src[0]) && instr->dest.ssa.bit_size >= 32) { + if (nir_src_is_const(instr->src[0]) && instr->def.bit_size >= 32) { uint32_t start = (nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[0])) / 4u; - uint32_t size = instr->num_components * (instr->dest.ssa.bit_size / 32u); + uint32_t size = instr->num_components * (instr->def.bit_size / 32u); if (start + size <= (MAX_PUSH_CONSTANTS_SIZE / 4u)) { info->inline_push_constant_mask |= u_bit_consecutive64(start, size); @@ -179,7 +179,7 @@ gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr, s break; case nir_intrinsic_load_local_invocation_id: case nir_intrinsic_load_workgroup_id: { - unsigned mask = nir_def_components_read(&instr->dest.ssa); + unsigned mask = nir_def_components_read(&instr->def); while (mask) { unsigned i = u_bit_scan(&mask); @@ -191,10 +191,10 @@ gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr, s break; } case nir_intrinsic_load_frag_coord: - info->ps.reads_frag_coord_mask |= nir_def_components_read(&instr->dest.ssa); + info->ps.reads_frag_coord_mask |= nir_def_components_read(&instr->def); break; case nir_intrinsic_load_sample_pos: - info->ps.reads_sample_pos_mask |= nir_def_components_read(&instr->dest.ssa); + info->ps.reads_sample_pos_mask |= nir_def_components_read(&instr->def); break; case nir_intrinsic_load_push_constant: gather_push_constant_info(nir, instr, info); diff --git a/src/asahi/compiler/agx_compile.c b/src/asahi/compiler/agx_compile.c index 7b8f54a..d662973 100644 --- a/src/asahi/compiler/agx_compile.c +++ b/src/asahi/compiler/agx_compile.c @@ -409,7 +409,7 @@ agx_emit_load_vary(agx_builder *b, agx_index dest, nir_intrinsic_instr *instr) nir_src *offset = nir_get_io_offset_src(instr); assert(nir_src_is_const(*offset) && "no indirects"); - assert(nir_def_components_read(&instr->dest.ssa) == + assert(nir_def_components_read(&instr->def) == nir_component_mask(components) && "iter does not handle write-after-write hazards"); @@ -519,7 +519,7 @@ agx_emit_local_load_pixel(agx_builder *b, agx_index dest, b->shader->did_writeout = true; b->shader->out->reads_tib = true; - unsigned nr_comps = instr->dest.ssa.num_components; + unsigned nr_comps = instr->def.num_components; agx_ld_tile_to(b, dest, agx_src_index(&instr->src[0]), agx_format_for_pipe(nir_intrinsic_format(instr)), BITFIELD_MASK(nr_comps), nir_intrinsic_base(instr)); @@ -539,8 +539,8 @@ agx_emit_load(agx_builder *b, agx_index dest, nir_intrinsic_instr *instr) offset = agx_abs(offset); agx_device_load_to(b, dest, addr, offset, fmt, - BITFIELD_MASK(instr->dest.ssa.num_components), shift, 0); - agx_emit_cached_split(b, dest, instr->dest.ssa.num_components); + BITFIELD_MASK(instr->def.num_components), shift, 0); + agx_emit_cached_split(b, dest, instr->def.num_components); } static void @@ -566,7 +566,7 @@ agx_emit_load_preamble(agx_builder *b, agx_index dst, nir_intrinsic_instr *instr) { agx_index srcs[4] = {agx_null()}; - unsigned dim = instr->dest.ssa.num_components; + unsigned dim = instr->def.num_components; assert(dim <= ARRAY_SIZE(srcs) && "shouldn't see larger vectors"); unsigned base = nir_intrinsic_base(instr); @@ -642,8 +642,8 @@ static agx_instr * agx_load_compute_dimension(agx_builder *b, agx_index dst, nir_intrinsic_instr *instr, enum agx_sr base) { - unsigned dim = instr->dest.ssa.num_components; - unsigned size = instr->dest.ssa.bit_size; + unsigned dim = instr->def.num_components; + unsigned size = instr->def.bit_size; assert(size == 16 || size == 32); agx_index srcs[] = { @@ -738,8 +738,8 @@ agx_emit_local_load(agx_builder *b, agx_index dst, nir_intrinsic_instr *instr) agx_index index = agx_zero(); /* TODO: optimize address arithmetic */ assert(base.size == AGX_SIZE_16); - enum agx_format format = format_for_bitsize(instr->dest.ssa.bit_size); - unsigned nr = instr->dest.ssa.num_components; + enum agx_format format = format_for_bitsize(instr->def.bit_size); + unsigned nr = instr->def.num_components; unsigned mask = BITFIELD_MASK(nr); agx_local_load_to(b, dst, base, index, format, mask); @@ -874,7 +874,7 @@ agx_emit_image_load(agx_builder *b, agx_index dst, nir_intrinsic_instr *intr) agx_instr *I = agx_image_load_to( b, tmp, coords, lod, bindless, texture, agx_txf_sampler(b->shader), agx_null(), agx_tex_dim(dim, is_array), lod_mode, 0, 0, false); - I->mask = agx_expand_tex_to(b, &intr->dest.ssa, tmp, true); + I->mask = agx_expand_tex_to(b, &intr->def, tmp, true); return NULL; } @@ -936,7 +936,7 @@ static agx_instr * agx_emit_intrinsic(agx_builder *b, nir_intrinsic_instr *instr) { agx_index dst = nir_intrinsic_infos[instr->intrinsic].has_dest - ? agx_def_index(&instr->dest.ssa) + ? agx_def_index(&instr->def) : agx_null(); gl_shader_stage stage = b->shader->stage; @@ -1663,7 +1663,7 @@ agx_emit_tex(agx_builder *b, nir_tex_instr *instr) } } - agx_index dst = agx_def_index(&instr->dest.ssa); + agx_index dst = agx_def_index(&instr->def); /* Pack shadow reference value (compare) and packed offset together */ agx_index compare_offset = agx_null(); @@ -1690,7 +1690,7 @@ agx_emit_tex(agx_builder *b, nir_tex_instr *instr) * textureGatherOffsets. Don't try to mask the destination for gathers. */ bool masked = (instr->op != nir_texop_tg4); - I->mask = agx_expand_tex_to(b, &instr->dest.ssa, tmp, masked); + I->mask = agx_expand_tex_to(b, &instr->def, tmp, masked); } /* @@ -1754,8 +1754,8 @@ agx_emit_jump(agx_builder *b, nir_jump_instr *instr) static void agx_emit_phi(agx_builder *b, nir_phi_instr *instr) { - agx_instr *I = agx_phi_to(b, agx_def_index(&instr->dest.ssa), - exec_list_length(&instr->srcs)); + agx_instr *I = + agx_phi_to(b, agx_def_index(&instr->def), exec_list_length(&instr->srcs)); /* Deferred */ I->phi = instr; @@ -1776,7 +1776,7 @@ agx_emit_phi_deferred(agx_context *ctx, agx_block *block, agx_instr *I) nir_phi_instr *phi = I->phi; /* Guaranteed by lower_phis_to_scalar */ - assert(phi->dest.ssa.num_components == 1); + assert(phi->def.num_components == 1); nir_foreach_phi_src(src, phi) { agx_block *pred = agx_from_nir_block(ctx, src->pred); @@ -2125,7 +2125,7 @@ agx_lower_front_face(struct nir_builder *b, nir_instr *instr, UNUSED void *data) if (intr->intrinsic != nir_intrinsic_load_front_face) return false; - nir_def *def = &intr->dest.ssa; + nir_def *def = &intr->def; assert(def->bit_size == 1); b->cursor = nir_before_instr(&intr->instr); diff --git a/src/asahi/compiler/agx_compiler.h b/src/asahi/compiler/agx_compiler.h index 65bc35e..688699c 100644 --- a/src/asahi/compiler/agx_compiler.h +++ b/src/asahi/compiler/agx_compiler.h @@ -474,7 +474,7 @@ agx_vec_for_def(agx_context *ctx, nir_def *def) static inline agx_index agx_vec_for_intr(agx_context *ctx, nir_intrinsic_instr *instr) { - return agx_vec_for_def(ctx, &instr->dest.ssa); + return agx_vec_for_def(ctx, &instr->def); } static inline unsigned diff --git a/src/asahi/compiler/agx_nir_lower_address.c b/src/asahi/compiler/agx_nir_lower_address.c index 4916c44..c22ccfb 100644 --- a/src/asahi/compiler/agx_nir_lower_address.c +++ b/src/asahi/compiler/agx_nir_lower_address.c @@ -270,7 +270,7 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data) unsigned bitsize = intr->intrinsic == nir_intrinsic_store_global ? nir_src_bit_size(intr->src[0]) - : intr->dest.ssa.bit_size; + : intr->def.bit_size; enum pipe_format format = format_for_bitsize(bitsize); unsigned format_shift = util_logbase2(util_format_get_blocksize(format)); @@ -311,8 +311,8 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data) nir_def *repl = NULL; bool has_dest = (intr->intrinsic != nir_intrinsic_store_global); - unsigned num_components = has_dest ? intr->dest.ssa.num_components : 0; - unsigned bit_size = has_dest ? intr->dest.ssa.bit_size : 0; + unsigned num_components = has_dest ? intr->def.num_components : 0; + unsigned bit_size = has_dest ? intr->def.bit_size : 0; if (intr->intrinsic == nir_intrinsic_load_global) { repl = @@ -344,7 +344,7 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data) } if (repl) - nir_def_rewrite_uses(&intr->dest.ssa, repl); + nir_def_rewrite_uses(&intr->def, repl); nir_instr_remove(instr); return true; diff --git a/src/asahi/compiler/agx_nir_lower_interpolation.c b/src/asahi/compiler/agx_nir_lower_interpolation.c index 6000b70..274d5ef 100644 --- a/src/asahi/compiler/agx_nir_lower_interpolation.c +++ b/src/asahi/compiler/agx_nir_lower_interpolation.c @@ -127,7 +127,7 @@ interpolate_channel(nir_builder *b, nir_intrinsic_instr *load, unsigned channel) .interp_mode = interp_mode_for_load(load), .io_semantics = sem); if (load->intrinsic == nir_intrinsic_load_input) { - assert(load->dest.ssa.bit_size == 32); + assert(load->def.bit_size == 32); return interpolate_flat(b, coefficients); } else { nir_intrinsic_instr *bary = nir_src_as_intrinsic(load->src[0]); @@ -136,7 +136,7 @@ interpolate_channel(nir_builder *b, nir_intrinsic_instr *load, unsigned channel) b, coefficients, bary->src[0].ssa, nir_intrinsic_interp_mode(bary) != INTERP_MODE_NOPERSPECTIVE); - return nir_f2fN(b, interp, load->dest.ssa.bit_size); + return nir_f2fN(b, interp, load->def.bit_size); } } @@ -147,11 +147,11 @@ lower(nir_builder *b, nir_instr *instr, void *data) /* Each component is loaded separated */ nir_def *values[NIR_MAX_VEC_COMPONENTS] = {NULL}; - for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) { + for (unsigned i = 0; i < intr->def.num_components; ++i) { values[i] = interpolate_channel(b, intr, i); } - return nir_vec(b, values, intr->dest.ssa.num_components); + return nir_vec(b, values, intr->def.num_components); } bool diff --git a/src/asahi/compiler/agx_nir_lower_load_mask.c b/src/asahi/compiler/agx_nir_lower_load_mask.c index 572f942..ec858e8 100644 --- a/src/asahi/compiler/agx_nir_lower_load_mask.c +++ b/src/asahi/compiler/agx_nir_lower_load_mask.c @@ -21,12 +21,12 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data) if (intr->intrinsic != nir_intrinsic_load_interpolated_input) return false; - unsigned mask = nir_def_components_read(&intr->dest.ssa); + unsigned mask = nir_def_components_read(&intr->def); if (mask == 0 || mask == nir_component_mask(intr->num_components)) return false; b->cursor = nir_before_instr(instr); - unsigned bit_size = intr->dest.ssa.bit_size; + unsigned bit_size = intr->def.bit_size; nir_def *comps[4] = {NULL}; for (unsigned c = 0; c < intr->num_components; ++c) { @@ -43,8 +43,8 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data) nir_intrinsic_instr *clone_intr = nir_instr_as_intrinsic(clone); /* Shrink the load to count contiguous components */ - nir_def_init(clone, &clone_intr->dest.ssa, count, bit_size); - nir_def *clone_vec = &clone_intr->dest.ssa; + nir_def_init(clone, &clone_intr->def, count, bit_size); + nir_def *clone_vec = &clone_intr->def; clone_intr->num_components = count; /* The load starts from component c relative to the original load */ @@ -68,8 +68,7 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data) } } - nir_def_rewrite_uses(&intr->dest.ssa, - nir_vec(b, comps, intr->num_components)); + nir_def_rewrite_uses(&intr->def, nir_vec(b, comps, intr->num_components)); return true; } diff --git a/src/asahi/compiler/agx_nir_lower_texture.c b/src/asahi/compiler/agx_nir_lower_texture.c index 338568c..3784406 100644 --- a/src/asahi/compiler/agx_nir_lower_texture.c +++ b/src/asahi/compiler/agx_nir_lower_texture.c @@ -140,7 +140,7 @@ agx_txs(nir_builder *b, nir_tex_instr *tex) height = depth; /* How we finish depends on the size of the result */ - unsigned nr_comps = tex->dest.ssa.num_components; + unsigned nr_comps = tex->def.num_components; assert(nr_comps <= 3); /* Adjust for LOD, do not adjust array size */ @@ -179,7 +179,7 @@ lower_txs(nir_builder *b, nir_instr *instr, UNUSED void *data) return false; nir_def *res = agx_txs(b, tex); - nir_def_rewrite_uses_after(&tex->dest.ssa, res, instr); + nir_def_rewrite_uses_after(&tex->def, res, instr); nir_instr_remove(instr); return true; } @@ -206,7 +206,7 @@ load_rgb32(nir_builder *b, nir_tex_instr *tex, nir_def *coordinate) nir_iand_imm(b, nir_ushr_imm(b, desc_hi, 2), BITFIELD64_MASK(36)); nir_def *base = nir_ishl_imm(b, base_shr4, 4); - nir_def *raw = nir_load_constant_agx(b, 3, tex->dest.ssa.bit_size, base, + nir_def *raw = nir_load_constant_agx(b, 3, tex->def.bit_size, base, nir_imul_imm(b, coordinate, 3), .format = AGX_INTERNAL_FORMAT_I32); @@ -277,11 +277,11 @@ lower_buffer_texture(nir_builder *b, nir_tex_instr *tex) nir_pop_if(b, nif); /* Put it together with a phi */ - nir_def *phi = nir_if_phi(b, rgb32, &tex->dest.ssa); - nir_def_rewrite_uses(&tex->dest.ssa, phi); + nir_def *phi = nir_if_phi(b, rgb32, &tex->def); + nir_def_rewrite_uses(&tex->def, phi); nir_phi_instr *phi_instr = nir_instr_as_phi(phi->parent_instr); nir_phi_src *else_src = nir_phi_get_src_from_block(phi_instr, else_block); - nir_instr_rewrite_src_ssa(phi->parent_instr, &else_src->src, &tex->dest.ssa); + nir_instr_rewrite_src_ssa(phi->parent_instr, &else_src->src, &tex->def); return true; } @@ -419,8 +419,8 @@ bias_for_tex(nir_builder *b, nir_tex_instr *tex) query->op = nir_texop_lod_bias_agx; query->dest_type = nir_type_float16; - nir_def_init(instr, &query->dest.ssa, 1, 16); - return &query->dest.ssa; + nir_def_init(instr, &query->def, 1, 16); + return &query->def; } static bool @@ -548,9 +548,9 @@ txs_for_image(nir_builder *b, nir_intrinsic_instr *intr, nir_tex_src_for_ssa(nir_tex_src_texture_offset, intr->src[0].ssa); } - nir_def_init(&tex->instr, &tex->dest.ssa, num_components, bit_size); + nir_def_init(&tex->instr, &tex->def, num_components, bit_size); nir_builder_instr_insert(b, &tex->instr); - return &tex->dest.ssa; + return &tex->def; } static nir_def * @@ -742,15 +742,14 @@ lower_images(nir_builder *b, nir_instr *instr, UNUSED void *data) case nir_intrinsic_image_size: case nir_intrinsic_bindless_image_size: - nir_def_rewrite_uses(&intr->dest.ssa, - txs_for_image(b, intr, intr->dest.ssa.num_components, - intr->dest.ssa.bit_size)); + nir_def_rewrite_uses( + &intr->def, + txs_for_image(b, intr, intr->def.num_components, intr->def.bit_size)); return true; case nir_intrinsic_image_texel_address: case nir_intrinsic_bindless_image_texel_address: - nir_def_rewrite_uses(&intr->dest.ssa, - image_texel_address(b, intr, false)); + nir_def_rewrite_uses(&intr->def, image_texel_address(b, intr, false)); return true; default: diff --git a/src/asahi/compiler/agx_nir_lower_ubo.c b/src/asahi/compiler/agx_nir_lower_ubo.c index ded86c8..cc1182a 100644 --- a/src/asahi/compiler/agx_nir_lower_ubo.c +++ b/src/asahi/compiler/agx_nir_lower_ubo.c @@ -26,9 +26,9 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data) nir_iadd(b, nir_load_ubo_base_agx(b, ubo_index), nir_u2u64(b, offset)); nir_def *value = nir_load_global_constant(b, address, nir_intrinsic_align(intr), - intr->num_components, intr->dest.ssa.bit_size); + intr->num_components, intr->def.bit_size); - nir_def_rewrite_uses(&intr->dest.ssa, value); + nir_def_rewrite_uses(&intr->def, value); return true; } diff --git a/src/asahi/lib/agx_meta.c b/src/asahi/lib/agx_meta.c index d65c48f..cbc2a42 100644 --- a/src/asahi/lib/agx_meta.c +++ b/src/asahi/lib/agx_meta.c @@ -57,10 +57,10 @@ build_background_op(nir_builder *b, enum agx_meta_op op, unsigned rt, tex->coord_components = 2; tex->texture_index = rt; - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(b, &tex->instr); - return nir_trim_vector(b, &tex->dest.ssa, nr); + return nir_trim_vector(b, &tex->def, nr); } else { assert(op == AGX_META_OP_CLEAR); diff --git a/src/asahi/lib/agx_nir_lower_msaa.c b/src/asahi/lib/agx_nir_lower_msaa.c index aba8404..f189607 100644 --- a/src/asahi/lib/agx_nir_lower_msaa.c +++ b/src/asahi/lib/agx_nir_lower_msaa.c @@ -20,8 +20,8 @@ lower_wrapped(nir_builder *b, nir_instr *instr, void *data) switch (intr->intrinsic) { case nir_intrinsic_load_sample_id: { - unsigned size = intr->dest.ssa.bit_size; - nir_def_rewrite_uses(&intr->dest.ssa, nir_u2uN(b, sample_id, size)); + unsigned size = intr->def.bit_size; + nir_def_rewrite_uses(&intr->def, nir_u2uN(b, sample_id, size)); nir_instr_remove(instr); return true; } @@ -151,7 +151,7 @@ lower_sample_mask_read(nir_builder *b, nir_instr *instr, UNUSED void *_) if (intr->intrinsic != nir_intrinsic_load_sample_mask_in) return false; - nir_def *old = &intr->dest.ssa; + nir_def *old = &intr->def; nir_def *lowered = nir_iand( b, old, nir_u2uN(b, nir_load_api_sample_mask_agx(b), old->bit_size)); diff --git a/src/asahi/lib/agx_nir_lower_sample_intrinsics.c b/src/asahi/lib/agx_nir_lower_sample_intrinsics.c index 154f80f..d306f79 100644 --- a/src/asahi/lib/agx_nir_lower_sample_intrinsics.c +++ b/src/asahi/lib/agx_nir_lower_sample_intrinsics.c @@ -52,11 +52,11 @@ lower_to_sample(nir_builder *b, nir_instr *instr, void *_) xy[i] = nir_fmul_imm(b, nir_u2f16(b, nibble), 1.0 / 16.0); /* Upconvert if necessary */ - xy[i] = nir_f2fN(b, xy[i], intr->dest.ssa.bit_size); + xy[i] = nir_f2fN(b, xy[i], intr->def.bit_size); } /* Collect and rewrite */ - nir_def_rewrite_uses(&intr->dest.ssa, nir_vec2(b, xy[0], xy[1])); + nir_def_rewrite_uses(&intr->def, nir_vec2(b, xy[0], xy[1])); nir_instr_remove(instr); return true; } @@ -67,7 +67,7 @@ lower_to_sample(nir_builder *b, nir_instr *instr, void *_) * by the sample ID to make that happen. */ b->cursor = nir_after_instr(instr); - nir_def *old = &intr->dest.ssa; + nir_def *old = &intr->def; nir_def *lowered = mask_by_sample_id(b, old); nir_def_rewrite_uses_after(old, lowered, lowered->parent_instr); return true; @@ -78,10 +78,10 @@ lower_to_sample(nir_builder *b, nir_instr *instr, void *_) * interpolateAtSample() with the sample ID */ b->cursor = nir_after_instr(instr); - nir_def *old = &intr->dest.ssa; + nir_def *old = &intr->def; nir_def *lowered = nir_load_barycentric_at_sample( - b, intr->dest.ssa.bit_size, nir_load_sample_id(b), + b, intr->def.bit_size, nir_load_sample_id(b), .interp_mode = nir_intrinsic_interp_mode(intr)); nir_def_rewrite_uses_after(old, lowered, lowered->parent_instr); diff --git a/src/asahi/lib/agx_nir_lower_tilebuffer.c b/src/asahi/lib/agx_nir_lower_tilebuffer.c index 7b40c0a..573bcd6 100644 --- a/src/asahi/lib/agx_nir_lower_tilebuffer.c +++ b/src/asahi/lib/agx_nir_lower_tilebuffer.c @@ -266,7 +266,7 @@ tib_impl(nir_builder *b, nir_instr *instr, void *data) return NIR_LOWER_INSTR_PROGRESS_REPLACE; } else { - uint8_t bit_size = intr->dest.ssa.bit_size; + uint8_t bit_size = intr->def.bit_size; /* Loads from non-existent render targets are undefined in NIR but not * possible to encode in the hardware, delete them. diff --git a/src/asahi/lib/agx_nir_lower_vbo.c b/src/asahi/lib/agx_nir_lower_vbo.c index 3e80252..168698a 100644 --- a/src/asahi/lib/agx_nir_lower_vbo.c +++ b/src/asahi/lib/agx_nir_lower_vbo.c @@ -149,7 +149,7 @@ pass(struct nir_builder *b, nir_instr *instr, void *data) util_format_is_pure_uint(interchange_format) && !util_format_is_pure_uint(attrib.format) ? (interchange_align * 8) - : intr->dest.ssa.bit_size; + : intr->def.bit_size; /* Non-UNORM R10G10B10A2 loaded as a scalar and unpacked */ if (interchange_format == PIPE_FORMAT_R32_UINT && !desc->is_array) @@ -190,7 +190,7 @@ pass(struct nir_builder *b, nir_instr *instr, void *data) b, interchange_comps, interchange_register_size, base, stride_offset_el, .format = interchange_format, .base = shift); - unsigned dest_size = intr->dest.ssa.bit_size; + unsigned dest_size = intr->def.bit_size; /* Unpack but do not convert non-native non-array formats */ if (is_rgb10_a2(desc) && interchange_format == PIPE_FORMAT_R32_UINT) { @@ -246,7 +246,7 @@ pass(struct nir_builder *b, nir_instr *instr, void *data) channels[i] = apply_swizzle_channel(b, memory, desc->swizzle[i], is_int); nir_def *logical = nir_vec(b, channels, intr->num_components); - nir_def_rewrite_uses(&intr->dest.ssa, logical); + nir_def_rewrite_uses(&intr->def, logical); return true; } diff --git a/src/broadcom/compiler/nir_to_vir.c b/src/broadcom/compiler/nir_to_vir.c index 92bb251..4120b3d 100644 --- a/src/broadcom/compiler/nir_to_vir.c +++ b/src/broadcom/compiler/nir_to_vir.c @@ -627,7 +627,7 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr, tmu_op, has_index, &tmu_writes); } else if (is_load) { - type_size = instr->dest.ssa.bit_size / 8; + type_size = instr->def.bit_size / 8; } /* For atomics we use 32bit except for CMPXCHG, that we need @@ -703,7 +703,7 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr, */ const uint32_t component_mask = (1 << dest_components) - 1; - ntq_add_pending_tmu_flush(c, &instr->dest.ssa, + ntq_add_pending_tmu_flush(c, &instr->def, component_mask); } } @@ -934,7 +934,7 @@ ntq_emit_txs(struct v3d_compile *c, nir_tex_instr *instr) unreachable("Bad sampler type"); } - ntq_store_def(c, &instr->dest.ssa, i, size); + ntq_store_def(c, &instr->def, i, size); } } @@ -949,11 +949,11 @@ ntq_emit_tex(struct v3d_compile *c, nir_tex_instr *instr) */ switch (instr->op) { case nir_texop_query_levels: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_uniform(c, QUNIFORM_TEXTURE_LEVELS, unit)); return; case nir_texop_texture_samples: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_uniform(c, QUNIFORM_TEXTURE_SAMPLES, unit)); return; case nir_texop_txs: @@ -2471,7 +2471,7 @@ ntq_setup_registers(struct v3d_compile *c, nir_function_impl *impl) struct qreg *qregs = ralloc_array(c->def_ht, struct qreg, array_len * num_components); - nir_def *nir_reg = &decl->dest.ssa; + nir_def *nir_reg = &decl->def; _mesa_hash_table_insert(c->def_ht, nir_reg, qregs); for (int i = 0; i < array_len * num_components; i++) @@ -2501,10 +2501,10 @@ ntq_emit_image_size(struct v3d_compile *c, nir_intrinsic_instr *instr) assert(nir_src_as_uint(instr->src[1]) == 0); - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_uniform(c, QUNIFORM_IMAGE_WIDTH, image_index)); if (instr->num_components > 1) { - ntq_store_def(c, &instr->dest.ssa, 1, + ntq_store_def(c, &instr->def, 1, vir_uniform(c, instr->num_components == 2 && is_array ? QUNIFORM_IMAGE_ARRAY_SIZE : @@ -2512,7 +2512,7 @@ ntq_emit_image_size(struct v3d_compile *c, nir_intrinsic_instr *instr) image_index)); } if (instr->num_components > 2) { - ntq_store_def(c, &instr->dest.ssa, 2, + ntq_store_def(c, &instr->def, 2, vir_uniform(c, is_array ? QUNIFORM_IMAGE_ARRAY_SIZE : @@ -2650,7 +2650,7 @@ vir_emit_tlb_color_read(struct v3d_compile *c, nir_intrinsic_instr *instr) } assert(color_reads_for_sample[component].file != QFILE_NULL); - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_MOV(c, color_reads_for_sample[component])); } @@ -2694,7 +2694,7 @@ static void ntq_emit_load_uniform(struct v3d_compile *c, nir_intrinsic_instr *instr) { /* We scalarize general TMU access for anything that is not 32-bit. */ - assert(instr->dest.ssa.bit_size == 32 || + assert(instr->def.bit_size == 32 || instr->num_components == 1); /* Try to emit ldunif if possible, otherwise fallback to general TMU */ @@ -2703,7 +2703,7 @@ ntq_emit_load_uniform(struct v3d_compile *c, nir_intrinsic_instr *instr) nir_src_as_uint(instr->src[0])); if (try_emit_uniform(c, offset, instr->num_components, - &instr->dest.ssa, QUNIFORM_UNIFORM)) { + &instr->def, QUNIFORM_UNIFORM)) { return; } } @@ -2726,13 +2726,13 @@ ntq_emit_inline_ubo_load(struct v3d_compile *c, nir_intrinsic_instr *instr) return false; /* We scalarize general TMU access for anything that is not 32-bit */ - assert(instr->dest.ssa.bit_size == 32 || + assert(instr->def.bit_size == 32 || instr->num_components == 1); if (nir_src_is_const(instr->src[1])) { int offset = nir_src_as_uint(instr->src[1]); if (try_emit_uniform(c, offset, instr->num_components, - &instr->dest.ssa, + &instr->def, QUNIFORM_INLINE_UBO_0 + index)) { return true; } @@ -2786,14 +2786,14 @@ ntq_emit_load_input(struct v3d_compile *c, nir_intrinsic_instr *instr) index += nir_intrinsic_component(instr); for (int i = 0; i < instr->num_components; i++) { struct qreg vpm_offset = vir_uniform_ui(c, index++); - ntq_store_def(c, &instr->dest.ssa, i, + ntq_store_def(c, &instr->def, i, vir_LDVPMV_IN(c, vpm_offset)); } } else { for (int i = 0; i < instr->num_components; i++) { int comp = nir_intrinsic_component(instr) + i; struct qreg input = c->inputs[offset * 4 + comp]; - ntq_store_def(c, &instr->dest.ssa, i, vir_MOV(c, input)); + ntq_store_def(c, &instr->def, i, vir_MOV(c, input)); if (c->s->info.stage == MESA_SHADER_FRAGMENT && input.file == c->payload_z.file && @@ -3108,7 +3108,7 @@ ntq_emit_load_unifa(struct v3d_compile *c, nir_intrinsic_instr *instr) * use ldunifa if we can verify alignment, which we can only do for * loads with a constant offset. */ - uint32_t bit_size = instr->dest.ssa.bit_size; + uint32_t bit_size = instr->def.bit_size; uint32_t value_skips = 0; if (bit_size < 32) { if (dynamic_src) { @@ -3205,7 +3205,7 @@ ntq_emit_load_unifa(struct v3d_compile *c, nir_intrinsic_instr *instr) if (bit_size == 32) { assert(value_skips == 0); - ntq_store_def(c, &instr->dest.ssa, i, vir_MOV(c, data)); + ntq_store_def(c, &instr->def, i, vir_MOV(c, data)); i++; } else { assert((bit_size == 16 && value_skips <= 1) || @@ -3234,7 +3234,7 @@ ntq_emit_load_unifa(struct v3d_compile *c, nir_intrinsic_instr *instr) uint32_t mask = (1 << bit_size) - 1; tmp = vir_AND(c, vir_MOV(c, data), vir_uniform_ui(c, mask)); - ntq_store_def(c, &instr->dest.ssa, i, + ntq_store_def(c, &instr->def, i, vir_MOV(c, tmp)); i++; valid_count--; @@ -3356,20 +3356,20 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr) break; case nir_intrinsic_get_ssbo_size: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_uniform(c, QUNIFORM_GET_SSBO_SIZE, nir_src_comp_as_uint(instr->src[0], 0))); break; case nir_intrinsic_get_ubo_size: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_uniform(c, QUNIFORM_GET_UBO_SIZE, nir_src_comp_as_uint(instr->src[0], 0))); break; case nir_intrinsic_load_user_clip_plane: for (int i = 0; i < nir_intrinsic_dest_components(instr); i++) { - ntq_store_def(c, &instr->dest.ssa, i, + ntq_store_def(c, &instr->def, i, vir_uniform(c, QUNIFORM_USER_CLIP_PLANE, nir_intrinsic_ucp_id(instr) * 4 + i)); @@ -3377,69 +3377,69 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr) break; case nir_intrinsic_load_viewport_x_scale: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE, 0)); break; case nir_intrinsic_load_viewport_y_scale: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_uniform(c, QUNIFORM_VIEWPORT_Y_SCALE, 0)); break; case nir_intrinsic_load_viewport_z_scale: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0)); break; case nir_intrinsic_load_viewport_z_offset: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0)); break; case nir_intrinsic_load_line_coord: - ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, c->line_x)); + ntq_store_def(c, &instr->def, 0, vir_MOV(c, c->line_x)); break; case nir_intrinsic_load_line_width: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_uniform(c, QUNIFORM_LINE_WIDTH, 0)); break; case nir_intrinsic_load_aa_line_width: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_uniform(c, QUNIFORM_AA_LINE_WIDTH, 0)); break; case nir_intrinsic_load_sample_mask_in: - ntq_store_def(c, &instr->dest.ssa, 0, vir_MSF(c)); + ntq_store_def(c, &instr->def, 0, vir_MSF(c)); break; case nir_intrinsic_load_helper_invocation: vir_set_pf(c, vir_MSF_dest(c, vir_nop_reg()), V3D_QPU_PF_PUSHZ); struct qreg qdest = ntq_emit_cond_to_bool(c, V3D_QPU_COND_IFA); - ntq_store_def(c, &instr->dest.ssa, 0, qdest); + ntq_store_def(c, &instr->def, 0, qdest); break; case nir_intrinsic_load_front_face: /* The register contains 0 (front) or 1 (back), and we need to * turn it into a NIR bool where true means front. */ - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_ADD(c, vir_uniform_ui(c, -1), vir_REVF(c))); break; case nir_intrinsic_load_base_instance: - ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, c->biid)); + ntq_store_def(c, &instr->def, 0, vir_MOV(c, c->biid)); break; case nir_intrinsic_load_instance_id: - ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, c->iid)); + ntq_store_def(c, &instr->def, 0, vir_MOV(c, c->iid)); break; case nir_intrinsic_load_vertex_id: - ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, c->vid)); + ntq_store_def(c, &instr->def, 0, vir_MOV(c, c->vid)); break; case nir_intrinsic_load_tlb_color_v3d: @@ -3542,7 +3542,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr) case nir_intrinsic_load_num_workgroups: for (int i = 0; i < 3; i++) { - ntq_store_def(c, &instr->dest.ssa, i, + ntq_store_def(c, &instr->def, i, vir_uniform(c, QUNIFORM_NUM_WORK_GROUPS, i)); } @@ -3552,32 +3552,32 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr) case nir_intrinsic_load_workgroup_id: { struct qreg x = vir_AND(c, c->cs_payload[0], vir_uniform_ui(c, 0xffff)); - ntq_store_def(c, &instr->dest.ssa, 0, x); + ntq_store_def(c, &instr->def, 0, x); struct qreg y = vir_SHR(c, c->cs_payload[0], vir_uniform_ui(c, 16)); - ntq_store_def(c, &instr->dest.ssa, 1, y); + ntq_store_def(c, &instr->def, 1, y); struct qreg z = vir_AND(c, c->cs_payload[1], vir_uniform_ui(c, 0xffff)); - ntq_store_def(c, &instr->dest.ssa, 2, z); + ntq_store_def(c, &instr->def, 2, z); break; } case nir_intrinsic_load_base_workgroup_id: { struct qreg x = vir_uniform(c, QUNIFORM_WORK_GROUP_BASE, 0); - ntq_store_def(c, &instr->dest.ssa, 0, x); + ntq_store_def(c, &instr->def, 0, x); struct qreg y = vir_uniform(c, QUNIFORM_WORK_GROUP_BASE, 1); - ntq_store_def(c, &instr->dest.ssa, 1, y); + ntq_store_def(c, &instr->def, 1, y); struct qreg z = vir_uniform(c, QUNIFORM_WORK_GROUP_BASE, 2); - ntq_store_def(c, &instr->dest.ssa, 2, z); + ntq_store_def(c, &instr->def, 2, z); break; } case nir_intrinsic_load_local_invocation_index: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, emit_load_local_invocation_index(c)); break; @@ -3588,7 +3588,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr) STATIC_ASSERT(IS_POT(V3D_CHANNELS) && V3D_CHANNELS > 0); const uint32_t divide_shift = ffs(V3D_CHANNELS) - 1; struct qreg lii = emit_load_local_invocation_index(c); - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_SHR(c, lii, vir_uniform_ui(c, divide_shift))); break; @@ -3627,7 +3627,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr) struct qreg col = ntq_get_src(c, instr->src[0], 0); for (int i = 0; i < instr->num_components; i++) { struct qreg row = vir_uniform_ui(c, row_idx++); - ntq_store_def(c, &instr->dest.ssa, i, + ntq_store_def(c, &instr->def, i, vir_LDVPMG_IN(c, row, col)); } break; @@ -3644,47 +3644,47 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr) * using ldvpm(v,d)_in (See Table 71). */ assert(c->s->info.stage == MESA_SHADER_GEOMETRY); - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_LDVPMV_IN(c, vir_uniform_ui(c, 0))); break; } case nir_intrinsic_load_invocation_id: - ntq_store_def(c, &instr->dest.ssa, 0, vir_IID(c)); + ntq_store_def(c, &instr->def, 0, vir_IID(c)); break; case nir_intrinsic_load_fb_layers_v3d: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_uniform(c, QUNIFORM_FB_LAYERS, 0)); break; case nir_intrinsic_load_sample_id: - ntq_store_def(c, &instr->dest.ssa, 0, vir_SAMPID(c)); + ntq_store_def(c, &instr->def, 0, vir_SAMPID(c)); break; case nir_intrinsic_load_sample_pos: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_FSUB(c, vir_FXCD(c), vir_ITOF(c, vir_XCD(c)))); - ntq_store_def(c, &instr->dest.ssa, 1, + ntq_store_def(c, &instr->def, 1, vir_FSUB(c, vir_FYCD(c), vir_ITOF(c, vir_YCD(c)))); break; case nir_intrinsic_load_barycentric_at_offset: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_MOV(c, ntq_get_src(c, instr->src[0], 0))); - ntq_store_def(c, &instr->dest.ssa, 1, + ntq_store_def(c, &instr->def, 1, vir_MOV(c, ntq_get_src(c, instr->src[0], 1))); break; case nir_intrinsic_load_barycentric_pixel: - ntq_store_def(c, &instr->dest.ssa, 0, vir_uniform_f(c, 0.0f)); - ntq_store_def(c, &instr->dest.ssa, 1, vir_uniform_f(c, 0.0f)); + ntq_store_def(c, &instr->def, 0, vir_uniform_f(c, 0.0f)); + ntq_store_def(c, &instr->def, 1, vir_uniform_f(c, 0.0f)); break; case nir_intrinsic_load_barycentric_at_sample: { if (!c->fs_key->msaa) { - ntq_store_def(c, &instr->dest.ssa, 0, vir_uniform_f(c, 0.0f)); - ntq_store_def(c, &instr->dest.ssa, 1, vir_uniform_f(c, 0.0f)); + ntq_store_def(c, &instr->def, 0, vir_uniform_f(c, 0.0f)); + ntq_store_def(c, &instr->def, 1, vir_uniform_f(c, 0.0f)); return; } @@ -3692,8 +3692,8 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr) struct qreg sample_idx = ntq_get_src(c, instr->src[0], 0); ntq_get_sample_offset(c, sample_idx, &offset_x, &offset_y); - ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, offset_x)); - ntq_store_def(c, &instr->dest.ssa, 1, vir_MOV(c, offset_y)); + ntq_store_def(c, &instr->def, 0, vir_MOV(c, offset_x)); + ntq_store_def(c, &instr->def, 1, vir_MOV(c, offset_y)); break; } @@ -3703,9 +3703,9 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr) struct qreg offset_y = vir_FSUB(c, vir_FYCD(c), vir_ITOF(c, vir_YCD(c))); - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_FSUB(c, offset_x, vir_uniform_f(c, 0.5f))); - ntq_store_def(c, &instr->dest.ssa, 1, + ntq_store_def(c, &instr->def, 1, vir_FSUB(c, offset_y, vir_uniform_f(c, 0.5f))); break; } @@ -3713,8 +3713,8 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr) case nir_intrinsic_load_barycentric_centroid: { struct qreg offset_x, offset_y; ntq_get_barycentric_centroid(c, &offset_x, &offset_y); - ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, offset_x)); - ntq_store_def(c, &instr->dest.ssa, 1, vir_MOV(c, offset_y)); + ntq_store_def(c, &instr->def, 0, vir_MOV(c, offset_x)); + ntq_store_def(c, &instr->def, 1, vir_MOV(c, offset_y)); break; } @@ -3733,7 +3733,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr) */ if (!c->fs_key->msaa || c->interp[input_idx].vp.file == QFILE_NULL) { - ntq_store_def(c, &instr->dest.ssa, i, + ntq_store_def(c, &instr->def, i, vir_MOV(c, c->inputs[input_idx])); continue; } @@ -3752,18 +3752,18 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr) ntq_emit_load_interpolated_input(c, p, C, offset_x, offset_y, interp_mode); - ntq_store_def(c, &instr->dest.ssa, i, result); + ntq_store_def(c, &instr->def, i, result); } break; } case nir_intrinsic_load_subgroup_size: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_uniform_ui(c, V3D_CHANNELS)); break; case nir_intrinsic_load_subgroup_invocation: - ntq_store_def(c, &instr->dest.ssa, 0, vir_EIDX(c)); + ntq_store_def(c, &instr->def, 0, vir_EIDX(c)); break; case nir_intrinsic_elect: { @@ -3775,7 +3775,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr) first, vir_uniform_ui(c, 1)), V3D_QPU_PF_PUSHZ); struct qreg result = ntq_emit_cond_to_bool(c, V3D_QPU_COND_IFA); - ntq_store_def(c, &instr->dest.ssa, 0, result); + ntq_store_def(c, &instr->def, 0, result); break; } @@ -3784,7 +3784,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr) break; case nir_intrinsic_load_view_index: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, vir_uniform(c, QUNIFORM_VIEW_INDEX, 0)); break; diff --git a/src/broadcom/compiler/v3d33_tex.c b/src/broadcom/compiler/v3d33_tex.c index 36a81c2..b4c888a 100644 --- a/src/broadcom/compiler/v3d33_tex.c +++ b/src/broadcom/compiler/v3d33_tex.c @@ -135,7 +135,7 @@ v3d33_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr) * instruction writes and how many the instruction could produce. */ p1_unpacked.return_words_of_texture_data = - nir_def_components_read(&instr->dest.ssa); + nir_def_components_read(&instr->def); uint32_t p0_packed; V3D33_TEXTURE_UNIFORM_PARAMETER_0_CFG_MODE1_pack(NULL, @@ -188,6 +188,6 @@ v3d33_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr) for (int i = 0; i < 4; i++) { if (p1_unpacked.return_words_of_texture_data & (1 << i)) - ntq_store_def(c, &instr->dest.ssa, i, vir_LDTMU(c)); + ntq_store_def(c, &instr->def, i, vir_LDTMU(c)); } } diff --git a/src/broadcom/compiler/v3d40_tex.c b/src/broadcom/compiler/v3d40_tex.c index 8a59491..9ae9938 100644 --- a/src/broadcom/compiler/v3d40_tex.c +++ b/src/broadcom/compiler/v3d40_tex.c @@ -250,10 +250,10 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr) /* Limit the number of channels returned to both how many the NIR * instruction writes and how many the instruction could produce. */ - nir_intrinsic_instr *store = nir_store_reg_for_def(&instr->dest.ssa); + nir_intrinsic_instr *store = nir_store_reg_for_def(&instr->def); if (store == NULL) { p0_unpacked.return_words_of_texture_data = - nir_def_components_read(&instr->dest.ssa); + nir_def_components_read(&instr->def); } else { nir_def *reg = store->src[1].ssa; nir_intrinsic_instr *decl = nir_reg_get_decl(reg); @@ -407,7 +407,7 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr) } retiring->ldtmu_count = p0_unpacked.return_words_of_texture_data; - ntq_add_pending_tmu_flush(c, &instr->dest.ssa, + ntq_add_pending_tmu_flush(c, &instr->def, p0_unpacked.return_words_of_texture_data); } @@ -639,6 +639,6 @@ v3d40_vir_emit_image_load_store(struct v3d_compile *c, struct qinst *retiring = vir_image_emit_register_writes(c, instr, atomic_add_replaced, NULL); retiring->ldtmu_count = p0_unpacked.return_words_of_texture_data; - ntq_add_pending_tmu_flush(c, &instr->dest.ssa, + ntq_add_pending_tmu_flush(c, &instr->def, p0_unpacked.return_words_of_texture_data); } diff --git a/src/broadcom/compiler/v3d_nir_lower_image_load_store.c b/src/broadcom/compiler/v3d_nir_lower_image_load_store.c index c0061d5..9e739e1 100644 --- a/src/broadcom/compiler/v3d_nir_lower_image_load_store.c +++ b/src/broadcom/compiler/v3d_nir_lower_image_load_store.c @@ -182,7 +182,7 @@ v3d_nir_lower_image_load(nir_builder *b, nir_intrinsic_instr *instr) b->cursor = nir_after_instr(&instr->instr); - nir_def *result = &instr->dest.ssa; + nir_def *result = &instr->def; if (util_format_is_pure_uint(format)) { result = nir_format_unpack_uint(b, result, bits16, 4); } else if (util_format_is_pure_sint(format)) { @@ -197,7 +197,7 @@ v3d_nir_lower_image_load(nir_builder *b, nir_intrinsic_instr *instr) nir_unpack_half_2x16_split_y(b, ba)); } - nir_def_rewrite_uses_after(&instr->dest.ssa, result, + nir_def_rewrite_uses_after(&instr->def, result, result->parent_instr); return true; diff --git a/src/broadcom/compiler/v3d_nir_lower_load_store_bitsize.c b/src/broadcom/compiler/v3d_nir_lower_load_store_bitsize.c index 6602f08..d546bfe 100644 --- a/src/broadcom/compiler/v3d_nir_lower_load_store_bitsize.c +++ b/src/broadcom/compiler/v3d_nir_lower_load_store_bitsize.c @@ -116,7 +116,7 @@ static bool lower_load_bitsize(nir_builder *b, nir_intrinsic_instr *intr) { - uint32_t bit_size = intr->dest.ssa.bit_size; + uint32_t bit_size = intr->def.bit_size; if (bit_size == 32) return false; @@ -153,15 +153,15 @@ lower_load_bitsize(nir_builder *b, } } - nir_def_init(&new_intr->instr, &new_intr->dest.ssa, 1, + nir_def_init(&new_intr->instr, &new_intr->def, 1, bit_size); - dest_components[component] = &new_intr->dest.ssa; + dest_components[component] = &new_intr->def; nir_builder_instr_insert(b, &new_intr->instr); } nir_def *new_dst = nir_vec(b, dest_components, num_comp); - nir_def_rewrite_uses(&intr->dest.ssa, new_dst); + nir_def_rewrite_uses(&intr->def, new_dst); nir_instr_remove(&intr->instr); return true; diff --git a/src/broadcom/compiler/v3d_nir_lower_scratch.c b/src/broadcom/compiler/v3d_nir_lower_scratch.c index a578237..a168953 100644 --- a/src/broadcom/compiler/v3d_nir_lower_scratch.c +++ b/src/broadcom/compiler/v3d_nir_lower_scratch.c @@ -65,8 +65,8 @@ v3d_nir_lower_load_scratch(nir_builder *b, nir_intrinsic_instr *instr) nir_intrinsic_instr *chan_instr = nir_intrinsic_instr_create(b->shader, instr->intrinsic); chan_instr->num_components = 1; - nir_def_init(&chan_instr->instr, &chan_instr->dest.ssa, 1, - instr->dest.ssa.bit_size); + nir_def_init(&chan_instr->instr, &chan_instr->def, 1, + instr->def.bit_size); chan_instr->src[0] = nir_src_for_ssa(chan_offset); @@ -74,11 +74,11 @@ v3d_nir_lower_load_scratch(nir_builder *b, nir_intrinsic_instr *instr) nir_builder_instr_insert(b, &chan_instr->instr); - chans[i] = &chan_instr->dest.ssa; + chans[i] = &chan_instr->def; } nir_def *result = nir_vec(b, chans, instr->num_components); - nir_def_rewrite_uses(&instr->dest.ssa, result); + nir_def_rewrite_uses(&instr->def, result); nir_instr_remove(&instr->instr); } diff --git a/src/broadcom/compiler/vir.c b/src/broadcom/compiler/vir.c index 1599aad..c997b67 100644 --- a/src/broadcom/compiler/vir.c +++ b/src/broadcom/compiler/vir.c @@ -1482,7 +1482,7 @@ lower_load_num_subgroups(struct v3d_compile *c, c->s->info.workgroup_size[1] * c->s->info.workgroup_size[2], V3D_CHANNELS); nir_def *result = nir_imm_int(b, num_subgroups); - nir_def_rewrite_uses(&intr->dest.ssa, result); + nir_def_rewrite_uses(&intr->def, result); nir_instr_remove(&intr->instr); } diff --git a/src/broadcom/vulkan/v3dv_meta_copy.c b/src/broadcom/vulkan/v3dv_meta_copy.c index 55dd3fe..758f7e5 100644 --- a/src/broadcom/vulkan/v3dv_meta_copy.c +++ b/src/broadcom/vulkan/v3dv_meta_copy.c @@ -2323,7 +2323,7 @@ get_texel_buffer_copy_fs(struct v3dv_device *device, VkFormat format, nir_iadd(&b, nir_iadd(&b, offset, x_offset), nir_imul(&b, y_offset, stride)); - nir_def *tex_deref = &nir_build_deref_var(&b, sampler)->dest.ssa; + nir_def *tex_deref = &nir_build_deref_var(&b, sampler)->def; nir_tex_instr *tex = nir_tex_instr_create(b.shader, 2); tex->sampler_dim = GLSL_SAMPLER_DIM_BUF; tex->op = nir_texop_txf; @@ -2332,7 +2332,7 @@ get_texel_buffer_copy_fs(struct v3dv_device *device, VkFormat format, tex->dest_type = nir_type_uint32; tex->is_array = false; tex->coord_components = 1; - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(&b, &tex->instr); uint32_t swiz[4]; @@ -2344,7 +2344,7 @@ get_texel_buffer_copy_fs(struct v3dv_device *device, VkFormat format, component_swizzle_to_nir_swizzle(VK_COMPONENT_SWIZZLE_B, cswizzle->b); swiz[3] = component_swizzle_to_nir_swizzle(VK_COMPONENT_SWIZZLE_A, cswizzle->a); - nir_def *s = nir_swizzle(&b, &tex->dest.ssa, swiz, 4); + nir_def *s = nir_swizzle(&b, &tex->def, swiz, 4); nir_store_var(&b, fs_out_color, s, 0xf); return b.shader; @@ -3597,7 +3597,7 @@ build_nir_tex_op_read(struct nir_builder *b, sampler->data.descriptor_set = 0; sampler->data.binding = 0; - nir_def *tex_deref = &nir_build_deref_var(b, sampler)->dest.ssa; + nir_def *tex_deref = &nir_build_deref_var(b, sampler)->def; nir_tex_instr *tex = nir_tex_instr_create(b->shader, 3); tex->sampler_dim = dim; tex->op = nir_texop_tex; @@ -3608,9 +3608,9 @@ build_nir_tex_op_read(struct nir_builder *b, tex->is_array = glsl_sampler_type_is_array(sampler_type); tex->coord_components = tex_pos->num_components; - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(b, &tex->instr); - return &tex->dest.ssa; + return &tex->def; } static nir_def * @@ -3631,9 +3631,9 @@ build_nir_tex_op_ms_fetch_sample(struct nir_builder *b, tex->is_array = false; tex->coord_components = tex_pos->num_components; - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(b, &tex->instr); - return &tex->dest.ssa; + return &tex->def; } /* Fetches all samples at the given position and averages them */ @@ -3654,7 +3654,7 @@ build_nir_tex_op_ms_resolve(struct nir_builder *b, const bool is_int = glsl_base_type_is_integer(tex_type); nir_def *tmp = NULL; - nir_def *tex_deref = &nir_build_deref_var(b, sampler)->dest.ssa; + nir_def *tex_deref = &nir_build_deref_var(b, sampler)->def; for (uint32_t i = 0; i < src_samples; i++) { nir_def *s = build_nir_tex_op_ms_fetch_sample(b, sampler, tex_deref, @@ -3687,7 +3687,7 @@ build_nir_tex_op_ms_read(struct nir_builder *b, sampler->data.descriptor_set = 0; sampler->data.binding = 0; - nir_def *tex_deref = &nir_build_deref_var(b, sampler)->dest.ssa; + nir_def *tex_deref = &nir_build_deref_var(b, sampler)->def; return build_nir_tex_op_ms_fetch_sample(b, sampler, tex_deref, tex_type, tex_pos, diff --git a/src/broadcom/vulkan/v3dv_pipeline.c b/src/broadcom/vulkan/v3dv_pipeline.c index 750f162..da236c5 100644 --- a/src/broadcom/vulkan/v3dv_pipeline.c +++ b/src/broadcom/vulkan/v3dv_pipeline.c @@ -568,7 +568,7 @@ lower_vulkan_resource_index(nir_builder *b, * vulkan_load_descriptor return a vec2 providing an index and * offset. Our backend compiler only cares about the index part. */ - nir_def_rewrite_uses(&instr->dest.ssa, + nir_def_rewrite_uses(&instr->def, nir_imm_ivec2(b, index, 0)); nir_instr_remove(&instr->instr); } @@ -826,7 +826,7 @@ lower_intrinsic(nir_builder *b, /* Loading the descriptor happens as part of load/store instructions, * so for us this is a no-op. */ - nir_def_rewrite_uses(&instr->dest.ssa, instr->src[0].ssa); + nir_def_rewrite_uses(&instr->def, instr->src[0].ssa); nir_instr_remove(&instr->instr); return true; } @@ -907,11 +907,11 @@ lower_point_coord_cb(nir_builder *b, nir_instr *instr, void *_state) return false; b->cursor = nir_after_instr(&intr->instr); - nir_def *result = &intr->dest.ssa; + nir_def *result = &intr->def; result = nir_vector_insert_imm(b, result, nir_fsub_imm(b, 1.0, nir_channel(b, result, 1)), 1); - nir_def_rewrite_uses_after(&intr->dest.ssa, + nir_def_rewrite_uses_after(&intr->def, result, result->parent_instr); return true; } diff --git a/src/compiler/glsl/gl_nir_link_varyings.c b/src/compiler/glsl/gl_nir_link_varyings.c index f44003b..354c741 100644 --- a/src/compiler/glsl/gl_nir_link_varyings.c +++ b/src/compiler/glsl/gl_nir_link_varyings.c @@ -2611,9 +2611,9 @@ replace_unused_interpolate_at_with_undef(nir_builder *b, nir_instr *instr, if (var->data.mode == nir_var_shader_temp) { /* Create undef and rewrite the interp uses */ nir_def *undef = - nir_undef(b, intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); - nir_def_rewrite_uses(&intrin->dest.ssa, undef); + nir_undef(b, intrin->def.num_components, + intrin->def.bit_size); + nir_def_rewrite_uses(&intrin->def, undef); nir_instr_remove(&intrin->instr); return true; diff --git a/src/compiler/glsl/gl_nir_lower_buffers.c b/src/compiler/glsl/gl_nir_lower_buffers.c index 0f54d75..7b469a9 100644 --- a/src/compiler/glsl/gl_nir_lower_buffers.c +++ b/src/compiler/glsl/gl_nir_lower_buffers.c @@ -194,8 +194,8 @@ lower_buffer_interface_derefs_impl(nir_function_impl *impl, break; /* We use nir_address_format_32bit_index_offset */ - assert(deref->dest.ssa.bit_size == 32); - deref->dest.ssa.num_components = 2; + assert(deref->def.bit_size == 32); + deref->def.num_components = 2; progress = true; @@ -237,8 +237,8 @@ lower_buffer_interface_derefs_impl(nir_function_impl *impl, cast->cast.align_mul = NIR_ALIGN_MUL_MAX; cast->cast.align_offset = offset % NIR_ALIGN_MUL_MAX; - nir_def_rewrite_uses(&deref->dest.ssa, - &cast->dest.ssa); + nir_def_rewrite_uses(&deref->def, + &cast->def); nir_deref_instr_remove_if_unused(deref); break; } @@ -261,9 +261,9 @@ lower_buffer_interface_derefs_impl(nir_function_impl *impl, */ if (glsl_type_is_boolean(deref->type)) { b.cursor = nir_after_instr(&intrin->instr); - intrin->dest.ssa.bit_size = 32; - nir_def *bval = nir_i2b(&b, &intrin->dest.ssa); - nir_def_rewrite_uses_after(&intrin->dest.ssa, + intrin->def.bit_size = 32; + nir_def *bval = nir_i2b(&b, &intrin->def); + nir_def_rewrite_uses_after(&intrin->def, bval, bval->parent_instr); progress = true; diff --git a/src/compiler/glsl/gl_nir_lower_packed_varyings.c b/src/compiler/glsl/gl_nir_lower_packed_varyings.c index a761940..66ccbce 100644 --- a/src/compiler/glsl/gl_nir_lower_packed_varyings.c +++ b/src/compiler/glsl/gl_nir_lower_packed_varyings.c @@ -822,7 +822,7 @@ lower_varying(struct lower_packed_varyings_state *state, swizzle_values[i] = i + location_frac; } - nir_def *ssa_def = &packed_deref->dest.ssa; + nir_def *ssa_def = &packed_deref->def; ssa_def = nir_load_deref(&state->b, packed_deref); nir_def *swizzle = nir_swizzle(&state->b, ssa_def, swizzle_values, components); diff --git a/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c b/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c index 03dc81b..8ce4fd0 100644 --- a/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c +++ b/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c @@ -286,7 +286,7 @@ lower_sampler(nir_tex_instr *instr, struct lower_samplers_as_deref_state *state, /* only lower non-bindless: */ if (texture_deref) { nir_instr_rewrite_src(&instr->instr, &instr->src[texture_idx].src, - nir_src_for_ssa(&texture_deref->dest.ssa)); + nir_src_for_ssa(&texture_deref->def)); record_textures_used(&b->shader->info, texture_deref, instr->op); } } @@ -297,7 +297,7 @@ lower_sampler(nir_tex_instr *instr, struct lower_samplers_as_deref_state *state, /* only lower non-bindless: */ if (sampler_deref) { nir_instr_rewrite_src(&instr->instr, &instr->src[sampler_idx].src, - nir_src_for_ssa(&sampler_deref->dest.ssa)); + nir_src_for_ssa(&sampler_deref->def)); record_samplers_used(&b->shader->info, sampler_deref, instr->op); } } @@ -329,7 +329,7 @@ lower_intrinsic(nir_intrinsic_instr *instr, if (!deref) return false; nir_instr_rewrite_src(&instr->instr, &instr->src[0], - nir_src_for_ssa(&deref->dest.ssa)); + nir_src_for_ssa(&deref->def)); return true; } if (instr->intrinsic == nir_intrinsic_image_deref_order || diff --git a/src/compiler/glsl/gl_nir_opt_dead_builtin_varyings.c b/src/compiler/glsl/gl_nir_opt_dead_builtin_varyings.c index 97d39ac..3aed5c2 100644 --- a/src/compiler/glsl/gl_nir_opt_dead_builtin_varyings.c +++ b/src/compiler/glsl/gl_nir_opt_dead_builtin_varyings.c @@ -298,7 +298,7 @@ rewrite_varying_deref(nir_builder *b, struct replace_varyings_data *rv_data, unsigned i = nir_src_as_uint(deref->arr.index); nir_deref_instr *new_deref = nir_build_deref_var(b, rv_data->new_texcoord[i]); - nir_def_rewrite_uses(&deref->dest.ssa, &new_deref->dest.ssa); + nir_def_rewrite_uses(&deref->def, &new_deref->def); return; } } diff --git a/src/compiler/glsl/glsl_to_nir.cpp b/src/compiler/glsl/glsl_to_nir.cpp index 4bcf070..377a61e 100644 --- a/src/compiler/glsl/glsl_to_nir.cpp +++ b/src/compiler/glsl/glsl_to_nir.cpp @@ -1180,7 +1180,7 @@ nir_visitor::visit(ir_call *ir) } nir_intrinsic_instr *instr = nir_intrinsic_instr_create(shader, op); - nir_def *ret = &instr->dest.ssa; + nir_def *ret = &instr->def; switch (op) { case nir_intrinsic_deref_atomic: @@ -1205,7 +1205,7 @@ nir_visitor::visit(ir_call *ir) nir_deref = nir_build_deref_array_imm(&b, nir_deref, swizzle->mask.x); } - instr->src[0] = nir_src_for_ssa(&nir_deref->dest.ssa); + instr->src[0] = nir_src_for_ssa(&nir_deref->def); nir_intrinsic_set_atomic_op(instr, atomic_op); nir_intrinsic_set_access(instr, deref_get_qualifier(nir_deref)); @@ -1226,10 +1226,10 @@ nir_visitor::visit(ir_call *ir) /* Atomic result */ assert(ir->return_deref); if (ir->return_deref->type->is_integer_64()) { - nir_def_init(&instr->instr, &instr->dest.ssa, + nir_def_init(&instr->instr, &instr->def, ir->return_deref->type->vector_elements, 64); } else { - nir_def_init(&instr->instr, &instr->dest.ssa, + nir_def_init(&instr->instr, &instr->def, ir->return_deref->type->vector_elements, 32); } nir_builder_instr_insert(&b, &instr->instr); @@ -1250,12 +1250,12 @@ nir_visitor::visit(ir_call *ir) exec_node *param = ir->actual_parameters.get_head(); ir_dereference *counter = (ir_dereference *)param; - instr->src[0] = nir_src_for_ssa(&evaluate_deref(counter)->dest.ssa); + instr->src[0] = nir_src_for_ssa(&evaluate_deref(counter)->def); param = param->get_next(); /* Set the intrinsic destination. */ if (ir->return_deref) { - nir_def_init(&instr->instr, &instr->dest.ssa, 1, 32); + nir_def_init(&instr->instr, &instr->def, 1, 32); } /* Set the intrinsic parameters. */ @@ -1294,7 +1294,7 @@ nir_visitor::visit(ir_call *ir) nir_intrinsic_set_atomic_op(instr, atomic_op); } - instr->src[0] = nir_src_for_ssa(&deref->dest.ssa); + instr->src[0] = nir_src_for_ssa(&deref->def); param = param->get_next(); nir_intrinsic_set_image_dim(instr, (glsl_sampler_dim)type->sampler_dimensionality); @@ -1311,14 +1311,14 @@ nir_visitor::visit(ir_call *ir) } else num_components = ir->return_deref->type->vector_elements; - nir_def_init(&instr->instr, &instr->dest.ssa, num_components, 32); + nir_def_init(&instr->instr, &instr->def, num_components, 32); } if (op == nir_intrinsic_image_deref_size) { - instr->num_components = instr->dest.ssa.num_components; + instr->num_components = instr->def.num_components; } else if (op == nir_intrinsic_image_deref_load || op == nir_intrinsic_image_deref_sparse_load) { - instr->num_components = instr->dest.ssa.num_components; + instr->num_components = instr->def.num_components; nir_intrinsic_set_dest_type(instr, nir_get_nir_type_for_glsl_base_type(type->sampled_type)); } else if (op == nir_intrinsic_image_deref_store) { @@ -1444,7 +1444,7 @@ nir_visitor::visit(ir_call *ir) break; } case nir_intrinsic_shader_clock: - nir_def_init(&instr->instr, &instr->dest.ssa, 2, 32); + nir_def_init(&instr->instr, &instr->def, 2, 32); nir_intrinsic_set_memory_scope(instr, SCOPE_SUBGROUP); nir_builder_instr_insert(&b, &instr->instr); break; @@ -1495,14 +1495,14 @@ nir_visitor::visit(ir_call *ir) /* Setup destination register */ unsigned bit_size = type->is_boolean() ? 32 : glsl_get_bit_size(type); - nir_def_init(&instr->instr, &instr->dest.ssa, type->vector_elements, + nir_def_init(&instr->instr, &instr->def, type->vector_elements, bit_size); nir_builder_instr_insert(&b, &instr->instr); /* The value in shared memory is a 32-bit value */ if (type->is_boolean()) - ret = nir_b2b1(&b, &instr->dest.ssa); + ret = nir_b2b1(&b, &instr->def); break; } case nir_intrinsic_store_shared: { @@ -1538,7 +1538,7 @@ nir_visitor::visit(ir_call *ir) FALLTHROUGH; case nir_intrinsic_vote_any: case nir_intrinsic_vote_all: { - nir_def_init(&instr->instr, &instr->dest.ssa, 1, 1); + nir_def_init(&instr->instr, &instr->def, 1, 1); ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head(); instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value)); @@ -1548,7 +1548,7 @@ nir_visitor::visit(ir_call *ir) } case nir_intrinsic_ballot: { - nir_def_init(&instr->instr, &instr->dest.ssa, + nir_def_init(&instr->instr, &instr->def, ir->return_deref->type->vector_elements, 64); instr->num_components = ir->return_deref->type->vector_elements; @@ -1559,7 +1559,7 @@ nir_visitor::visit(ir_call *ir) break; } case nir_intrinsic_read_invocation: { - nir_def_init(&instr->instr, &instr->dest.ssa, + nir_def_init(&instr->instr, &instr->def, ir->return_deref->type->vector_elements, 32); instr->num_components = ir->return_deref->type->vector_elements; @@ -1573,7 +1573,7 @@ nir_visitor::visit(ir_call *ir) break; } case nir_intrinsic_read_first_invocation: { - nir_def_init(&instr->instr, &instr->dest.ssa, + nir_def_init(&instr->instr, &instr->def, ir->return_deref->type->vector_elements, 32); instr->num_components = ir->return_deref->type->vector_elements; @@ -1584,12 +1584,12 @@ nir_visitor::visit(ir_call *ir) break; } case nir_intrinsic_is_helper_invocation: { - nir_def_init(&instr->instr, &instr->dest.ssa, 1, 1); + nir_def_init(&instr->instr, &instr->def, 1, 1); nir_builder_instr_insert(&b, &instr->instr); break; } case nir_intrinsic_is_sparse_texels_resident: { - nir_def_init(&instr->instr, &instr->dest.ssa, 1, 1); + nir_def_init(&instr->instr, &instr->def, 1, 1); ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head(); instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value)); @@ -1627,7 +1627,7 @@ nir_visitor::visit(ir_call *ir) nir_local_variable_create(this->impl, ir->return_deref->type, "return_tmp"); ret_deref = nir_build_deref_var(&b, ret_tmp); - call->params[i++] = nir_src_for_ssa(&ret_deref->dest.ssa); + call->params[i++] = nir_src_for_ssa(&ret_deref->def); } foreach_two_lists(formal_node, &ir->callee->parameters, @@ -1637,7 +1637,7 @@ nir_visitor::visit(ir_call *ir) if (sig_param->data.mode == ir_var_function_out) { nir_deref_instr *out_deref = evaluate_deref(param_rvalue); - call->params[i] = nir_src_for_ssa(&out_deref->dest.ssa); + call->params[i] = nir_src_for_ssa(&out_deref->def); } else if (sig_param->data.mode == ir_var_function_in) { nir_def *val = evaluate_rvalue(param_rvalue); nir_src src = nir_src_for_ssa(val); @@ -1736,13 +1736,13 @@ get_instr_def(nir_instr *instr) case nir_instr_type_intrinsic: intrinsic_instr = nir_instr_as_intrinsic(instr); if (nir_intrinsic_infos[intrinsic_instr->intrinsic].has_dest) - return &intrinsic_instr->dest.ssa; + return &intrinsic_instr->def; else return NULL; case nir_instr_type_tex: tex_instr = nir_instr_as_tex(instr); - return &tex_instr->dest.ssa; + return &tex_instr->def; default: unreachable("not reached"); @@ -1837,7 +1837,7 @@ nir_visitor::visit(ir_expression *ir) nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(shader, op); intrin->num_components = deref->type->vector_elements; - intrin->src[0] = nir_src_for_ssa(&this->deref->dest.ssa); + intrin->src[0] = nir_src_for_ssa(&this->deref->def); if (intrin->intrinsic == nir_intrinsic_interp_deref_at_offset || intrin->intrinsic == nir_intrinsic_interp_deref_at_sample) @@ -1864,7 +1864,7 @@ nir_visitor::visit(ir_expression *ir) nir_intrinsic_deref_buffer_array_length); ir_dereference *deref = ir->operands[0]->as_dereference(); - intrin->src[0] = nir_src_for_ssa(&evaluate_deref(deref)->dest.ssa); + intrin->src[0] = nir_src_for_ssa(&evaluate_deref(deref)->def); add_instr(&intrin->instr, 1, 32); return; @@ -2461,9 +2461,9 @@ nir_visitor::visit(ir_texture *ir) instr->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_handle, load); } else { instr->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &sampler_deref->dest.ssa); + &sampler_deref->def); instr->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref, - &sampler_deref->dest.ssa); + &sampler_deref->def); } unsigned src_number = 2; diff --git a/src/compiler/nir/nir.c b/src/compiler/nir/nir.c index d362e9a..4ae14bb 100644 --- a/src/compiler/nir/nir.c +++ b/src/compiler/nir/nir.c @@ -1293,22 +1293,22 @@ nir_instr_ssa_def(nir_instr *instr) return &nir_instr_as_alu(instr)->def; case nir_instr_type_deref: - return &nir_instr_as_deref(instr)->dest.ssa; + return &nir_instr_as_deref(instr)->def; case nir_instr_type_tex: - return &nir_instr_as_tex(instr)->dest.ssa; + return &nir_instr_as_tex(instr)->def; case nir_instr_type_intrinsic: { nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); if (nir_intrinsic_infos[intrin->intrinsic].has_dest) { - return &intrin->dest.ssa; + return &intrin->def; } else { return NULL; } } case nir_instr_type_phi: - return &nir_instr_as_phi(instr)->dest.ssa; + return &nir_instr_as_phi(instr)->def; case nir_instr_type_parallel_copy: unreachable("Parallel copies are unsupported by this function"); diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h index 7b82f18..7402d40 100644 --- a/src/compiler/nir/nir.h +++ b/src/compiler/nir/nir.h @@ -1038,10 +1038,6 @@ nir_def_used_by_if(const nir_def *def) return false; } -typedef struct { - nir_def ssa; -} nir_dest; - static inline nir_src nir_src_for_ssa(nir_def *def) { @@ -1538,7 +1534,7 @@ typedef struct { }; /** Destination to store the resulting "pointer" */ - nir_dest dest; + nir_def def; } nir_deref_instr; /** Returns true if deref might have one of the given modes @@ -1723,7 +1719,7 @@ typedef struct { nir_intrinsic_op intrinsic; - nir_dest dest; + nir_def def; /** number of components if this is a vectorized intrinsic * @@ -2199,7 +2195,7 @@ typedef struct { nir_texop op; /** Destination */ - nir_dest dest; + nir_def def; /** Array of sources * @@ -2468,7 +2464,7 @@ typedef struct { struct exec_list srcs; /** < list of nir_phi_src */ - nir_dest dest; + nir_def def; } nir_phi_instr; static inline nir_phi_src * @@ -2489,7 +2485,7 @@ typedef struct { bool dest_is_reg; nir_src src; union { - nir_dest dest; + nir_def def; nir_src reg; } dest; } nir_parallel_copy_entry; @@ -6183,13 +6179,13 @@ nir_is_store_reg(nir_intrinsic_instr *intr) #define nir_foreach_reg_load(load, reg) \ assert(reg->intrinsic == nir_intrinsic_decl_reg); \ \ - nir_foreach_use(load, ®->dest.ssa) \ + nir_foreach_use(load, ®->def) \ if (nir_is_load_reg(nir_instr_as_intrinsic(load->parent_instr))) #define nir_foreach_reg_store(store, reg) \ assert(reg->intrinsic == nir_intrinsic_decl_reg); \ \ - nir_foreach_use(store, ®->dest.ssa) \ + nir_foreach_use(store, ®->def) \ if (nir_is_store_reg(nir_instr_as_intrinsic(store->parent_instr))) static inline nir_intrinsic_instr * diff --git a/src/compiler/nir/nir_builder.c b/src/compiler/nir/nir_builder.c index 2924113..713864b 100644 --- a/src/compiler/nir/nir_builder.c +++ b/src/compiler/nir/nir_builder.c @@ -256,11 +256,11 @@ nir_build_tex_deref_instr(nir_builder *build, nir_texop op, unsigned src_idx = 0; tex->src[src_idx++] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &texture->dest.ssa); + &texture->def); if (sampler != NULL) { assert(glsl_type_is_sampler(sampler->type)); tex->src[src_idx++] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref, - &sampler->dest.ssa); + &sampler->def); } for (unsigned i = 0; i < num_extra_srcs; i++) { switch (extra_srcs[i].src_type) { @@ -304,11 +304,11 @@ nir_build_tex_deref_instr(nir_builder *build, nir_texop op, } assert(src_idx == num_srcs); - nir_def_init(&tex->instr, &tex->dest.ssa, nir_tex_instr_dest_size(tex), + nir_def_init(&tex->instr, &tex->def, nir_tex_instr_dest_size(tex), nir_alu_type_get_type_size(tex->dest_type)); nir_builder_instr_insert(build, &tex->instr); - return &tex->dest.ssa; + return &tex->def; } nir_def * @@ -385,9 +385,9 @@ nir_load_system_value(nir_builder *build, nir_intrinsic_op op, int index, load->num_components = num_components; load->const_index[0] = index; - nir_def_init(&load->instr, &load->dest.ssa, num_components, bit_size); + nir_def_init(&load->instr, &load->def, num_components, bit_size); nir_builder_instr_insert(build, &load->instr); - return &load->dest.ssa; + return &load->def; } void @@ -472,12 +472,12 @@ nir_if_phi(nir_builder *build, nir_def *then_def, nir_def *else_def) assert(then_def->num_components == else_def->num_components); assert(then_def->bit_size == else_def->bit_size); - nir_def_init(&phi->instr, &phi->dest.ssa, then_def->num_components, + nir_def_init(&phi->instr, &phi->def, then_def->num_components, then_def->bit_size); nir_builder_instr_insert(build, &phi->instr); - return &phi->dest.ssa; + return &phi->def; } nir_loop * diff --git a/src/compiler/nir/nir_builder.h b/src/compiler/nir/nir_builder.h index 5767f90..c444798 100644 --- a/src/compiler/nir/nir_builder.h +++ b/src/compiler/nir/nir_builder.h @@ -1298,7 +1298,7 @@ nir_build_deref_var(nir_builder *build, nir_variable *var) deref->type = var->type; deref->var = var; - nir_def_init(&deref->instr, &deref->dest.ssa, 1, + nir_def_init(&deref->instr, &deref->def, 1, nir_get_ptr_bitsize(build->shader)); nir_builder_instr_insert(build, &deref->instr); @@ -1314,18 +1314,18 @@ nir_build_deref_array(nir_builder *build, nir_deref_instr *parent, glsl_type_is_matrix(parent->type) || glsl_type_is_vector(parent->type)); - assert(index->bit_size == parent->dest.ssa.bit_size); + assert(index->bit_size == parent->def.bit_size); nir_deref_instr *deref = nir_deref_instr_create(build->shader, nir_deref_type_array); deref->modes = parent->modes; deref->type = glsl_get_array_element(parent->type); - deref->parent = nir_src_for_ssa(&parent->dest.ssa); + deref->parent = nir_src_for_ssa(&parent->def); deref->arr.index = nir_src_for_ssa(index); - nir_def_init(&deref->instr, &deref->dest.ssa, - parent->dest.ssa.num_components, parent->dest.ssa.bit_size); + nir_def_init(&deref->instr, &deref->def, + parent->def.num_components, parent->def.bit_size); nir_builder_instr_insert(build, &deref->instr); @@ -1337,7 +1337,7 @@ nir_build_deref_array_imm(nir_builder *build, nir_deref_instr *parent, int64_t index) { nir_def *idx_ssa = nir_imm_intN_t(build, index, - parent->dest.ssa.bit_size); + parent->def.bit_size); return nir_build_deref_array(build, parent, idx_ssa); } @@ -1350,18 +1350,18 @@ nir_build_deref_ptr_as_array(nir_builder *build, nir_deref_instr *parent, parent->deref_type == nir_deref_type_ptr_as_array || parent->deref_type == nir_deref_type_cast); - assert(index->bit_size == parent->dest.ssa.bit_size); + assert(index->bit_size == parent->def.bit_size); nir_deref_instr *deref = nir_deref_instr_create(build->shader, nir_deref_type_ptr_as_array); deref->modes = parent->modes; deref->type = parent->type; - deref->parent = nir_src_for_ssa(&parent->dest.ssa); + deref->parent = nir_src_for_ssa(&parent->def); deref->arr.index = nir_src_for_ssa(index); - nir_def_init(&deref->instr, &deref->dest.ssa, - parent->dest.ssa.num_components, parent->dest.ssa.bit_size); + nir_def_init(&deref->instr, &deref->def, + parent->def.num_components, parent->def.bit_size); nir_builder_instr_insert(build, &deref->instr); @@ -1379,10 +1379,10 @@ nir_build_deref_array_wildcard(nir_builder *build, nir_deref_instr *parent) deref->modes = parent->modes; deref->type = glsl_get_array_element(parent->type); - deref->parent = nir_src_for_ssa(&parent->dest.ssa); + deref->parent = nir_src_for_ssa(&parent->def); - nir_def_init(&deref->instr, &deref->dest.ssa, - parent->dest.ssa.num_components, parent->dest.ssa.bit_size); + nir_def_init(&deref->instr, &deref->def, + parent->def.num_components, parent->def.bit_size); nir_builder_instr_insert(build, &deref->instr); @@ -1400,11 +1400,11 @@ nir_build_deref_struct(nir_builder *build, nir_deref_instr *parent, deref->modes = parent->modes; deref->type = glsl_get_struct_field(parent->type, index); - deref->parent = nir_src_for_ssa(&parent->dest.ssa); + deref->parent = nir_src_for_ssa(&parent->def); deref->strct.index = index; - nir_def_init(&deref->instr, &deref->dest.ssa, - parent->dest.ssa.num_components, parent->dest.ssa.bit_size); + nir_def_init(&deref->instr, &deref->def, + parent->def.num_components, parent->def.bit_size); nir_builder_instr_insert(build, &deref->instr); @@ -1424,7 +1424,7 @@ nir_build_deref_cast(nir_builder *build, nir_def *parent, deref->parent = nir_src_for_ssa(parent); deref->cast.ptr_stride = ptr_stride; - nir_def_init(&deref->instr, &deref->dest.ssa, parent->num_components, + nir_def_init(&deref->instr, &deref->def, parent->num_components, parent->bit_size); nir_builder_instr_insert(build, &deref->instr); @@ -1441,13 +1441,13 @@ nir_alignment_deref_cast(nir_builder *build, nir_deref_instr *parent, deref->modes = parent->modes; deref->type = parent->type; - deref->parent = nir_src_for_ssa(&parent->dest.ssa); + deref->parent = nir_src_for_ssa(&parent->def); deref->cast.ptr_stride = nir_deref_instr_array_stride(deref); deref->cast.align_mul = align_mul; deref->cast.align_offset = align_offset; - nir_def_init(&deref->instr, &deref->dest.ssa, - parent->dest.ssa.num_components, parent->dest.ssa.bit_size); + nir_def_init(&deref->instr, &deref->def, + parent->def.num_components, parent->def.bit_size); nir_builder_instr_insert(build, &deref->instr); @@ -1465,7 +1465,7 @@ nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent, nir_deref_instr *leader) { /* If the derefs would have the same parent, don't make a new one */ - if (leader->parent.ssa == &parent->dest.ssa) + if (leader->parent.ssa == &parent->def) return leader; UNUSED nir_deref_instr *leader_parent = nir_src_as_deref(leader->parent); @@ -1486,7 +1486,7 @@ nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent, if (leader->deref_type == nir_deref_type_array) { nir_def *index = nir_i2iN(b, leader->arr.index.ssa, - parent->dest.ssa.bit_size); + parent->def.bit_size); return nir_build_deref_array(b, parent, index); } else { return nir_build_deref_array_wildcard(b, parent); @@ -1509,7 +1509,7 @@ nir_load_deref_with_access(nir_builder *build, nir_deref_instr *deref, enum gl_access_qualifier access) { return nir_build_load_deref(build, glsl_get_vector_elements(deref->type), - glsl_get_bit_size(deref->type), &deref->dest.ssa, + glsl_get_bit_size(deref->type), &deref->def, access); } @@ -1526,7 +1526,7 @@ nir_store_deref_with_access(nir_builder *build, nir_deref_instr *deref, enum gl_access_qualifier access) { writemask &= (1u << value->num_components) - 1u; - nir_build_store_deref(build, &deref->dest.ssa, value, writemask, access); + nir_build_store_deref(build, &deref->def, value, writemask, access); } #undef nir_store_deref @@ -1544,7 +1544,7 @@ nir_copy_deref_with_access(nir_builder *build, nir_deref_instr *dest, enum gl_access_qualifier dest_access, enum gl_access_qualifier src_access) { - nir_build_copy_deref(build, &dest->dest.ssa, &src->dest.ssa, dest_access, src_access); + nir_build_copy_deref(build, &dest->def, &src->def, dest_access, src_access); } #undef nir_copy_deref @@ -1562,7 +1562,7 @@ nir_memcpy_deref_with_access(nir_builder *build, nir_deref_instr *dest, enum gl_access_qualifier dest_access, enum gl_access_qualifier src_access) { - nir_build_memcpy_deref(build, &dest->dest.ssa, &src->dest.ssa, + nir_build_memcpy_deref(build, &dest->def, &src->def, size, dest_access, src_access); } @@ -1640,9 +1640,9 @@ nir_load_global(nir_builder *build, nir_def *addr, unsigned align, load->num_components = num_components; load->src[0] = nir_src_for_ssa(addr); nir_intrinsic_set_align(load, align, 0); - nir_def_init(&load->instr, &load->dest.ssa, num_components, bit_size); + nir_def_init(&load->instr, &load->def, num_components, bit_size); nir_builder_instr_insert(build, &load->instr); - return &load->dest.ssa; + return &load->def; } #undef nir_store_global @@ -1671,9 +1671,9 @@ nir_load_global_constant(nir_builder *build, nir_def *addr, unsigned align, load->num_components = num_components; load->src[0] = nir_src_for_ssa(addr); nir_intrinsic_set_align(load, align, 0); - nir_def_init(&load->instr, &load->dest.ssa, num_components, bit_size); + nir_def_init(&load->instr, &load->def, num_components, bit_size); nir_builder_instr_insert(build, &load->instr); - return &load->dest.ssa; + return &load->def; } #undef nir_load_param @@ -1696,11 +1696,11 @@ nir_decl_reg(nir_builder *b, unsigned num_components, unsigned bit_size, nir_intrinsic_set_bit_size(decl, bit_size); nir_intrinsic_set_num_array_elems(decl, num_array_elems); nir_intrinsic_set_divergent(decl, true); - nir_def_init(&decl->instr, &decl->dest.ssa, 1, 32); + nir_def_init(&decl->instr, &decl->def, 1, 32); nir_instr_insert(nir_before_cf_list(&b->impl->body), &decl->instr); - return &decl->dest.ssa; + return &decl->def; } #undef nir_load_reg @@ -1873,10 +1873,10 @@ nir_load_barycentric(nir_builder *build, nir_intrinsic_op op, { unsigned num_components = op == nir_intrinsic_load_barycentric_model ? 3 : 2; nir_intrinsic_instr *bary = nir_intrinsic_instr_create(build->shader, op); - nir_def_init(&bary->instr, &bary->dest.ssa, num_components, 32); + nir_def_init(&bary->instr, &bary->def, num_components, 32); nir_intrinsic_set_interp_mode(bary, interp_mode); nir_builder_instr_insert(build, &bary->instr); - return &bary->dest.ssa; + return &bary->def; } static inline void diff --git a/src/compiler/nir/nir_builder_opcodes_h.py b/src/compiler/nir/nir_builder_opcodes_h.py index a21d68e..f815ab9 100644 --- a/src/compiler/nir/nir_builder_opcodes_h.py +++ b/src/compiler/nir/nir_builder_opcodes_h.py @@ -128,9 +128,9 @@ _nir_build_${name}(nir_builder *build${intrinsic_decl_list(opcode)}) % endif % if opcode.has_dest: % if opcode.dest_components == 0: - nir_def_init(&intrin->instr, &intrin->dest.ssa, intrin->num_components, ${get_intrinsic_bitsize(opcode)}); + nir_def_init(&intrin->instr, &intrin->def, intrin->num_components, ${get_intrinsic_bitsize(opcode)}); % else: - nir_def_init(&intrin->instr, &intrin->dest.ssa, ${opcode.dest_components}, ${get_intrinsic_bitsize(opcode)}); + nir_def_init(&intrin->instr, &intrin->def, ${opcode.dest_components}, ${get_intrinsic_bitsize(opcode)}); % endif % endif % for i in range(opcode.num_srcs): @@ -145,7 +145,7 @@ _nir_build_${name}(nir_builder *build${intrinsic_decl_list(opcode)}) indices.align_mul = src${opcode.src_components.index(0)}->bit_size / 8u; % elif ALIGN_MUL in opcode.indices and opcode.dest_components == 0: if (!indices.align_mul) - indices.align_mul = intrin->dest.ssa.bit_size / 8u; + indices.align_mul = intrin->def.bit_size / 8u; % endif % for index in opcode.indices: nir_intrinsic_set_${index.name}(intrin, indices.${index.name}); @@ -153,7 +153,7 @@ _nir_build_${name}(nir_builder *build${intrinsic_decl_list(opcode)}) nir_builder_instr_insert(build, &intrin->instr); % if opcode.has_dest: - return &intrin->dest.ssa; + return &intrin->def; % else: return intrin; % endif diff --git a/src/compiler/nir/nir_builtin_builder.c b/src/compiler/nir/nir_builtin_builder.c index d15c6ae..8f649de 100644 --- a/src/compiler/nir/nir_builtin_builder.c +++ b/src/compiler/nir/nir_builtin_builder.c @@ -373,10 +373,10 @@ nir_get_texture_size(nir_builder *b, nir_tex_instr *tex) /* Add in an LOD because some back-ends require it */ txs->src[idx] = nir_tex_src_for_ssa(nir_tex_src_lod, nir_imm_int(b, 0)); - nir_def_init(&txs->instr, &txs->dest.ssa, nir_tex_instr_dest_size(txs), 32); + nir_def_init(&txs->instr, &txs->def, nir_tex_instr_dest_size(txs), 32); nir_builder_instr_insert(b, &txs->instr); - return &txs->dest.ssa; + return &txs->def; } nir_def * @@ -424,9 +424,9 @@ nir_get_texture_lod(nir_builder *b, nir_tex_instr *tex) } } - nir_def_init(&tql->instr, &tql->dest.ssa, 2, 32); + nir_def_init(&tql->instr, &tql->def, 2, 32); nir_builder_instr_insert(b, &tql->instr); /* The LOD is the y component of the result */ - return nir_channel(b, &tql->dest.ssa, 1); + return nir_channel(b, &tql->def, 1); } diff --git a/src/compiler/nir/nir_clone.c b/src/compiler/nir/nir_clone.c index 50bb697..488d322 100644 --- a/src/compiler/nir/nir_clone.c +++ b/src/compiler/nir/nir_clone.c @@ -246,7 +246,7 @@ clone_deref_instr(clone_state *state, const nir_deref_instr *deref) nir_deref_instr *nderef = nir_deref_instr_create(state->ns, deref->deref_type); - __clone_def(state, &nderef->instr, &nderef->dest.ssa, &deref->dest.ssa); + __clone_def(state, &nderef->instr, &nderef->def, &deref->def); nderef->modes = deref->modes; nderef->type = deref->type; @@ -296,7 +296,7 @@ clone_intrinsic(clone_state *state, const nir_intrinsic_instr *itr) unsigned num_srcs = nir_intrinsic_infos[itr->intrinsic].num_srcs; if (nir_intrinsic_infos[itr->intrinsic].has_dest) - __clone_def(state, &nitr->instr, &nitr->dest.ssa, &itr->dest.ssa); + __clone_def(state, &nitr->instr, &nitr->def, &itr->def); nitr->num_components = itr->num_components; memcpy(nitr->const_index, itr->const_index, sizeof(nitr->const_index)); @@ -341,7 +341,7 @@ clone_tex(clone_state *state, const nir_tex_instr *tex) ntex->sampler_dim = tex->sampler_dim; ntex->dest_type = tex->dest_type; ntex->op = tex->op; - __clone_def(state, &ntex->instr, &ntex->dest.ssa, &tex->dest.ssa); + __clone_def(state, &ntex->instr, &ntex->def, &tex->def); for (unsigned i = 0; i < ntex->num_srcs; i++) { ntex->src[i].src_type = tex->src[i].src_type; __clone_src(state, &ntex->instr, &ntex->src[i].src, &tex->src[i].src); @@ -371,7 +371,7 @@ clone_phi(clone_state *state, const nir_phi_instr *phi, nir_block *nblk) { nir_phi_instr *nphi = nir_phi_instr_create(state->ns); - __clone_def(state, &nphi->instr, &nphi->dest.ssa, &phi->dest.ssa); + __clone_def(state, &nphi->instr, &nphi->def, &phi->def); /* Cloning a phi node is a bit different from other instructions. The * sources of phi instructions are the only time where we can use an SSA diff --git a/src/compiler/nir/nir_control_flow.c b/src/compiler/nir/nir_control_flow.c index e657724..9e922a0 100644 --- a/src/compiler/nir/nir_control_flow.c +++ b/src/compiler/nir/nir_control_flow.c @@ -225,8 +225,8 @@ nir_insert_phi_undef(nir_block *block, nir_block *pred) nir_foreach_phi(phi, block) { nir_undef_instr *undef = nir_undef_instr_create(impl->function->shader, - phi->dest.ssa.num_components, - phi->dest.ssa.bit_size); + phi->def.num_components, + phi->def.bit_size); nir_instr_insert_before_cf_list(&impl->body, &undef->instr); nir_phi_src *src = nir_phi_instr_add_src(phi, pred, nir_src_for_ssa(&undef->def)); list_addtail(&src->src.use_link, &undef->def.uses); diff --git a/src/compiler/nir/nir_deref.c b/src/compiler/nir/nir_deref.c index 5293c02..749e98f 100644 --- a/src/compiler/nir/nir_deref.c +++ b/src/compiler/nir/nir_deref.c @@ -35,8 +35,8 @@ is_trivial_deref_cast(nir_deref_instr *cast) return cast->modes == parent->modes && cast->type == parent->type && - cast->dest.ssa.num_components == parent->dest.ssa.num_components && - cast->dest.ssa.bit_size == parent->dest.ssa.bit_size; + cast->def.num_components == parent->def.num_components && + cast->def.bit_size == parent->def.bit_size; } void @@ -109,7 +109,7 @@ nir_deref_instr_remove_if_unused(nir_deref_instr *instr) for (nir_deref_instr *d = instr; d; d = nir_deref_instr_parent(d)) { /* If anyone is using this deref, leave it alone */ - if (!nir_def_is_unused(&d->dest.ssa)) + if (!nir_def_is_unused(&d->def)) break; nir_instr_remove(&d->instr); @@ -156,7 +156,7 @@ bool nir_deref_instr_has_complex_use(nir_deref_instr *deref, nir_deref_instr_has_complex_use_options opts) { - nir_foreach_use_including_if(use_src, &deref->dest.ssa) { + nir_foreach_use_including_if(use_src, &deref->def) { if (use_src->is_if) return true; @@ -346,7 +346,7 @@ nir_build_deref_offset(nir_builder *b, nir_deref_instr *deref, nir_deref_path path; nir_deref_path_init(&path, deref, NULL); - nir_def *offset = nir_imm_intN_t(b, 0, deref->dest.ssa.bit_size); + nir_def *offset = nir_imm_intN_t(b, 0, deref->def.bit_size); for (nir_deref_instr **p = &path.path[1]; *p; p++) { switch ((*p)->deref_type) { case nir_deref_type_array: @@ -774,7 +774,7 @@ rematerialize_deref_in_block(nir_deref_instr *deref, nir_deref_instr *parent = nir_src_as_deref(deref->parent); if (parent) { parent = rematerialize_deref_in_block(parent, state); - new_deref->parent = nir_src_for_ssa(&parent->dest.ssa); + new_deref->parent = nir_src_for_ssa(&parent->def); } else { nir_src_copy(&new_deref->parent, &deref->parent, &new_deref->instr); } @@ -806,8 +806,8 @@ rematerialize_deref_in_block(nir_deref_instr *deref, unreachable("Invalid deref instruction type"); } - nir_def_init(&new_deref->instr, &new_deref->dest.ssa, - deref->dest.ssa.num_components, deref->dest.ssa.bit_size); + nir_def_init(&new_deref->instr, &new_deref->def, + deref->def.num_components, deref->def.bit_size); nir_builder_instr_insert(b, &new_deref->instr); return new_deref; @@ -825,7 +825,7 @@ rematerialize_deref_src(nir_src *src, void *_state) nir_deref_instr *block_deref = rematerialize_deref_in_block(deref, state); if (block_deref != deref) { nir_instr_rewrite_src(src->parent_instr, src, - nir_src_for_ssa(&block_deref->dest.ssa)); + nir_src_for_ssa(&block_deref->def)); nir_deref_instr_remove_if_unused(deref); state->progress = true; } @@ -885,7 +885,7 @@ nir_rematerialize_derefs_in_use_blocks_impl(nir_function_impl *impl) static void nir_deref_instr_fixup_child_types(nir_deref_instr *parent) { - nir_foreach_use(use, &parent->dest.ssa) { + nir_foreach_use(use, &parent->def) { if (use->parent_instr->type != nir_instr_type_deref) continue; @@ -1121,8 +1121,8 @@ opt_remove_sampler_cast(nir_deref_instr *cast) /* We're a cast from a more detailed sampler type to a bare sampler or a * texture type with the same dimensionality. */ - nir_def_rewrite_uses(&cast->dest.ssa, - &parent->dest.ssa); + nir_def_rewrite_uses(&cast->def, + &parent->def); nir_instr_remove(&cast->instr); /* Recursively crawl the deref tree and clean up types */ @@ -1169,7 +1169,7 @@ opt_replace_struct_wrapper_cast(nir_builder *b, nir_deref_instr *cast) return false; nir_deref_instr *replace = nir_build_deref_struct(b, parent, 0); - nir_def_rewrite_uses(&cast->dest.ssa, &replace->dest.ssa); + nir_def_rewrite_uses(&cast->def, &replace->def); nir_deref_instr_remove_if_unused(cast); return true; } @@ -1199,7 +1199,7 @@ opt_deref_cast(nir_builder *b, nir_deref_instr *cast) bool trivial_array_cast = is_trivial_array_deref_cast(cast); - nir_foreach_use_including_if_safe(use_src, &cast->dest.ssa) { + nir_foreach_use_including_if_safe(use_src, &cast->def) { assert(!use_src->is_if && "there cannot be if-uses"); /* If this isn't a trivial array cast, we can't propagate into @@ -1242,8 +1242,8 @@ opt_deref_ptr_as_array(nir_builder *b, nir_deref_instr *deref) parent->cast.align_mul == 0 && is_trivial_deref_cast(parent)) parent = nir_deref_instr_parent(parent); - nir_def_rewrite_uses(&deref->dest.ssa, - &parent->dest.ssa); + nir_def_rewrite_uses(&deref->def, + &parent->def); nir_instr_remove(&deref->instr); return true; } @@ -1331,15 +1331,15 @@ opt_load_vec_deref(nir_builder *b, nir_intrinsic_instr *load) { nir_deref_instr *deref = nir_src_as_deref(load->src[0]); nir_component_mask_t read_mask = - nir_def_components_read(&load->dest.ssa); + nir_def_components_read(&load->def); /* LLVM loves take advantage of the fact that vec3s in OpenCL are * vec4-aligned and so it can just read/write them as vec4s. This * results in a LOT of vec4->vec3 casts on loads and stores. */ if (is_vector_bitcast_deref(deref, read_mask, false)) { - const unsigned old_num_comps = load->dest.ssa.num_components; - const unsigned old_bit_size = load->dest.ssa.bit_size; + const unsigned old_num_comps = load->def.num_components; + const unsigned old_bit_size = load->def.bit_size; nir_deref_instr *parent = nir_src_as_deref(deref->parent); const unsigned new_num_comps = glsl_get_vector_elements(parent->type); @@ -1347,18 +1347,18 @@ opt_load_vec_deref(nir_builder *b, nir_intrinsic_instr *load) /* Stomp it to reference the parent */ nir_instr_rewrite_src(&load->instr, &load->src[0], - nir_src_for_ssa(&parent->dest.ssa)); - load->dest.ssa.bit_size = new_bit_size; - load->dest.ssa.num_components = new_num_comps; + nir_src_for_ssa(&parent->def)); + load->def.bit_size = new_bit_size; + load->def.num_components = new_num_comps; load->num_components = new_num_comps; b->cursor = nir_after_instr(&load->instr); - nir_def *data = &load->dest.ssa; + nir_def *data = &load->def; if (old_bit_size != new_bit_size) - data = nir_bitcast_vector(b, &load->dest.ssa, old_bit_size); + data = nir_bitcast_vector(b, &load->def, old_bit_size); data = resize_vector(b, data, old_num_comps); - nir_def_rewrite_uses_after(&load->dest.ssa, data, + nir_def_rewrite_uses_after(&load->def, data, data->parent_instr); return true; } @@ -1386,7 +1386,7 @@ opt_store_vec_deref(nir_builder *b, nir_intrinsic_instr *store) const unsigned new_bit_size = glsl_get_bit_size(parent->type); nir_instr_rewrite_src(&store->instr, &store->src[0], - nir_src_for_ssa(&parent->dest.ssa)); + nir_src_for_ssa(&parent->def)); /* Restrict things down as needed so the bitcast doesn't fail */ data = nir_trim_vector(b, data, util_last_bit(write_mask)); @@ -1426,7 +1426,7 @@ opt_known_deref_mode_is(nir_builder *b, nir_intrinsic_instr *intrin) if (deref_is == NULL) return false; - nir_def_rewrite_uses(&intrin->dest.ssa, deref_is); + nir_def_rewrite_uses(&intrin->def, deref_is); nir_instr_remove(&intrin->instr); return true; } diff --git a/src/compiler/nir/nir_divergence_analysis.c b/src/compiler/nir/nir_divergence_analysis.c index 432c774..b22e2a7 100644 --- a/src/compiler/nir/nir_divergence_analysis.c +++ b/src/compiler/nir/nir_divergence_analysis.c @@ -83,7 +83,7 @@ visit_intrinsic(nir_shader *shader, nir_intrinsic_instr *instr) if (!nir_intrinsic_infos[instr->intrinsic].has_dest) return false; - if (instr->dest.ssa.divergent) + if (instr->def.divergent) return false; nir_divergence_options options = shader->options->divergence_analysis_options; @@ -620,14 +620,14 @@ visit_intrinsic(nir_shader *shader, nir_intrinsic_instr *instr) #endif } - instr->dest.ssa.divergent = is_divergent; + instr->def.divergent = is_divergent; return is_divergent; } static bool visit_tex(nir_tex_instr *instr) { - if (instr->dest.ssa.divergent) + if (instr->def.divergent) return false; bool is_divergent = false; @@ -652,7 +652,7 @@ visit_tex(nir_tex_instr *instr) } } - instr->dest.ssa.divergent = is_divergent; + instr->def.divergent = is_divergent; return is_divergent; } @@ -716,7 +716,7 @@ nir_variable_is_uniform(nir_shader *shader, nir_variable *var) static bool visit_deref(nir_shader *shader, nir_deref_instr *deref) { - if (deref->dest.ssa.divergent) + if (deref->def.divergent) return false; bool is_divergent = false; @@ -738,7 +738,7 @@ visit_deref(nir_shader *shader, nir_deref_instr *deref) break; } - deref->dest.ssa.divergent = is_divergent; + deref->def.divergent = is_divergent; return is_divergent; } @@ -834,14 +834,14 @@ visit_block(nir_block *block, struct divergence_state *state) static bool visit_if_merge_phi(nir_phi_instr *phi, bool if_cond_divergent) { - if (phi->dest.ssa.divergent) + if (phi->def.divergent) return false; unsigned defined_srcs = 0; nir_foreach_phi_src(src, phi) { /* if any source value is divergent, the resulting value is divergent */ if (src->src.ssa->divergent) { - phi->dest.ssa.divergent = true; + phi->def.divergent = true; return true; } if (src->src.ssa->parent_instr->type != nir_instr_type_ssa_undef) { @@ -851,7 +851,7 @@ visit_if_merge_phi(nir_phi_instr *phi, bool if_cond_divergent) /* if the condition is divergent and two sources defined, the definition is divergent */ if (defined_srcs > 1 && if_cond_divergent) { - phi->dest.ssa.divergent = true; + phi->def.divergent = true; return true; } @@ -867,14 +867,14 @@ visit_if_merge_phi(nir_phi_instr *phi, bool if_cond_divergent) static bool visit_loop_header_phi(nir_phi_instr *phi, nir_block *preheader, bool divergent_continue) { - if (phi->dest.ssa.divergent) + if (phi->def.divergent) return false; nir_def *same = NULL; nir_foreach_phi_src(src, phi) { /* if any source value is divergent, the resulting value is divergent */ if (src->src.ssa->divergent) { - phi->dest.ssa.divergent = true; + phi->def.divergent = true; return true; } /* if this loop is uniform, we're done here */ @@ -891,7 +891,7 @@ visit_loop_header_phi(nir_phi_instr *phi, nir_block *preheader, bool divergent_c if (!same) same = src->src.ssa; else if (same != src->src.ssa) { - phi->dest.ssa.divergent = true; + phi->def.divergent = true; return true; } } @@ -908,18 +908,18 @@ visit_loop_header_phi(nir_phi_instr *phi, nir_block *preheader, bool divergent_c static bool visit_loop_exit_phi(nir_phi_instr *phi, bool divergent_break) { - if (phi->dest.ssa.divergent) + if (phi->def.divergent) return false; if (divergent_break) { - phi->dest.ssa.divergent = true; + phi->def.divergent = true; return true; } /* if any source value is divergent, the resulting value is divergent */ nir_foreach_phi_src(src, phi) { if (src->src.ssa->divergent) { - phi->dest.ssa.divergent = true; + phi->def.divergent = true; return true; } } @@ -943,7 +943,7 @@ visit_if(nir_if *if_stmt, struct divergence_state *state) /* handle phis after the IF */ nir_foreach_phi(phi, nir_cf_node_cf_tree_next(&if_stmt->cf_node)) { if (state->first_visit) - phi->dest.ssa.divergent = false; + phi->def.divergent = false; progress |= visit_if_merge_phi(phi, if_stmt->condition.ssa->divergent); } @@ -972,16 +972,16 @@ visit_loop(nir_loop *loop, struct divergence_state *state) /* handle loop header phis first: we have no knowledge yet about * the loop's control flow or any loop-carried sources. */ nir_foreach_phi(phi, loop_header) { - if (!state->first_visit && phi->dest.ssa.divergent) + if (!state->first_visit && phi->def.divergent) continue; nir_foreach_phi_src(src, phi) { if (src->pred == loop_preheader) { - phi->dest.ssa.divergent = src->src.ssa->divergent; + phi->def.divergent = src->src.ssa->divergent; break; } } - progress |= phi->dest.ssa.divergent; + progress |= phi->def.divergent; } /* setup loop state */ @@ -1009,7 +1009,7 @@ visit_loop(nir_loop *loop, struct divergence_state *state) /* handle phis after the loop */ nir_foreach_phi(phi, nir_cf_node_cf_tree_next(&loop->cf_node)) { if (state->first_visit) - phi->dest.ssa.divergent = false; + phi->def.divergent = false; progress |= visit_loop_exit_phi(phi, loop_state.divergent_loop_break); } diff --git a/src/compiler/nir/nir_from_ssa.c b/src/compiler/nir/nir_from_ssa.c index 0084b00..a3eaca9 100644 --- a/src/compiler/nir/nir_from_ssa.c +++ b/src/compiler/nir/nir_from_ssa.c @@ -401,31 +401,31 @@ isolate_phi_nodes_block(nir_shader *shader, nir_block *block, void *dead_ctx) nir_parallel_copy_entry); entry->src_is_reg = false; entry->dest_is_reg = false; - nir_def_init(&pcopy->instr, &entry->dest.dest.ssa, - phi->dest.ssa.num_components, phi->dest.ssa.bit_size); - entry->dest.dest.ssa.divergent = nir_src_is_divergent(src->src); + nir_def_init(&pcopy->instr, &entry->dest.def, + phi->def.num_components, phi->def.bit_size); + entry->dest.def.divergent = nir_src_is_divergent(src->src); exec_list_push_tail(&pcopy->entries, &entry->node); nir_instr_rewrite_src(&pcopy->instr, &entry->src, src->src); nir_instr_rewrite_src(&phi->instr, &src->src, - nir_src_for_ssa(&entry->dest.dest.ssa)); + nir_src_for_ssa(&entry->dest.def)); } nir_parallel_copy_entry *entry = rzalloc(dead_ctx, nir_parallel_copy_entry); entry->src_is_reg = false; entry->dest_is_reg = false; - nir_def_init(&block_pcopy->instr, &entry->dest.dest.ssa, - phi->dest.ssa.num_components, phi->dest.ssa.bit_size); - entry->dest.dest.ssa.divergent = phi->dest.ssa.divergent; + nir_def_init(&block_pcopy->instr, &entry->dest.def, + phi->def.num_components, phi->def.bit_size); + entry->dest.def.divergent = phi->def.divergent; exec_list_push_tail(&block_pcopy->entries, &entry->node); - nir_def_rewrite_uses(&phi->dest.ssa, - &entry->dest.dest.ssa); + nir_def_rewrite_uses(&phi->def, + &entry->dest.def); nir_instr_rewrite_src(&block_pcopy->instr, &entry->src, - nir_src_for_ssa(&phi->dest.ssa)); + nir_src_for_ssa(&phi->def)); } return true; @@ -435,7 +435,7 @@ static bool coalesce_phi_nodes_block(nir_block *block, struct from_ssa_state *state) { nir_foreach_phi(phi, block) { - merge_node *dest_node = get_merge_node(&phi->dest.ssa, state); + merge_node *dest_node = get_merge_node(&phi->def, state); nir_foreach_phi_src(src, phi) { if (nir_src_is_undef(src->src)) @@ -457,7 +457,7 @@ aggressive_coalesce_parallel_copy(nir_parallel_copy_instr *pcopy, nir_foreach_parallel_copy_entry(entry, pcopy) { assert(!entry->src_is_reg); assert(!entry->dest_is_reg); - assert(entry->dest.dest.ssa.num_components == + assert(entry->dest.def.num_components == entry->src.ssa->num_components); /* Since load_const instructions are SSA only, we can't replace their @@ -467,7 +467,7 @@ aggressive_coalesce_parallel_copy(nir_parallel_copy_instr *pcopy, continue; merge_node *src_node = get_merge_node(entry->src.ssa, state); - merge_node *dest_node = get_merge_node(&entry->dest.dest.ssa, state); + merge_node *dest_node = get_merge_node(&entry->dest.def, state); if (src_node->set == dest_node->set) continue; @@ -557,7 +557,7 @@ nir_rewrite_uses_to_load_reg(nir_builder *b, nir_def *old, if (intr->intrinsic == nir_intrinsic_load_reg && intr->src[0].ssa == reg && nir_intrinsic_base(intr) == 0) - load = &intr->dest.ssa; + load = &intr->def; } } @@ -621,7 +621,7 @@ remove_no_op_phi(nir_instr *instr, struct from_ssa_state *state) nir_phi_instr *phi = nir_instr_as_phi(instr); struct hash_entry *entry = - _mesa_hash_table_search(state->merge_node_table, &phi->dest.ssa); + _mesa_hash_table_search(state->merge_node_table, &phi->def); assert(entry != NULL); merge_node *node = (merge_node *)entry->data; @@ -716,10 +716,10 @@ resolve_registers_impl(nir_function_impl *impl, struct from_ssa_state *state) nir_foreach_parallel_copy_entry(entry, pcopy) { assert(!entry->dest_is_reg); - assert(nir_def_is_unused(&entry->dest.dest.ssa)); + assert(nir_def_is_unused(&entry->dest.def)); /* Parallel copy destinations will always be registers */ - nir_def *reg = reg_for_ssa_def(&entry->dest.dest.ssa, state); + nir_def *reg = reg_for_ssa_def(&entry->dest.def, state); assert(reg != NULL); entry->dest_is_reg = true; @@ -1157,10 +1157,10 @@ nir_lower_phis_to_regs_block(nir_block *block) bool progress = false; nir_foreach_phi_safe(phi, block) { - nir_def *reg = decl_reg_for_ssa_def(&b, &phi->dest.ssa); + nir_def *reg = decl_reg_for_ssa_def(&b, &phi->def); b.cursor = nir_after_instr(&phi->instr); - nir_def_rewrite_uses(&phi->dest.ssa, nir_load_reg(&b, reg)); + nir_def_rewrite_uses(&phi->def, nir_load_reg(&b, reg)); nir_foreach_phi_src(src, phi) { diff --git a/src/compiler/nir/nir_gather_types.c b/src/compiler/nir/nir_gather_types.c index 50c7fe5..1516640 100644 --- a/src/compiler/nir/nir_gather_types.c +++ b/src/compiler/nir/nir_gather_types.c @@ -152,7 +152,7 @@ nir_gather_types(nir_function_impl *impl, nir_tex_instr_src_type(tex, i), float_types, int_types, &progress); } - set_type(tex->dest.ssa.index, tex->dest_type, + set_type(tex->def.index, tex->dest_type, float_types, int_types, &progress); break; } @@ -162,7 +162,7 @@ nir_gather_types(nir_function_impl *impl, nir_alu_type dest_type = nir_intrinsic_instr_dest_type(intrin); if (dest_type != nir_type_invalid) { - set_type(intrin->dest.ssa.index, dest_type, + set_type(intrin->def.index, dest_type, float_types, int_types, &progress); } @@ -180,7 +180,7 @@ nir_gather_types(nir_function_impl *impl, case nir_instr_type_phi: { nir_phi_instr *phi = nir_instr_as_phi(instr); nir_foreach_phi_src(src, phi) { - copy_types(src->src, &phi->dest.ssa, + copy_types(src->src, &phi->def, float_types, int_types, &progress); } break; diff --git a/src/compiler/nir/nir_inline_functions.c b/src/compiler/nir/nir_inline_functions.c index ac4ae09..ee88d94 100644 --- a/src/compiler/nir/nir_inline_functions.c +++ b/src/compiler/nir/nir_inline_functions.c @@ -84,7 +84,7 @@ nir_inline_function_impl(struct nir_builder *b, unsigned param_idx = nir_intrinsic_param_idx(load); assert(param_idx < impl->function->num_params); - nir_def_rewrite_uses(&load->dest.ssa, + nir_def_rewrite_uses(&load->def, params[param_idx]); /* Remove any left-over load_param intrinsics because they're soon diff --git a/src/compiler/nir/nir_inline_helpers.h b/src/compiler/nir/nir_inline_helpers.h index bec7c65..d05b9a7 100644 --- a/src/compiler/nir/nir_inline_helpers.h +++ b/src/compiler/nir/nir_inline_helpers.h @@ -8,20 +8,20 @@ _nir_foreach_def(nir_instr *instr, nir_foreach_def_cb cb, void *state) case nir_instr_type_alu: return cb(&nir_instr_as_alu(instr)->def, state); case nir_instr_type_deref: - return cb(&nir_instr_as_deref(instr)->dest.ssa, state); + return cb(&nir_instr_as_deref(instr)->def, state); case nir_instr_type_intrinsic: { nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); if (nir_intrinsic_infos[intrin->intrinsic].has_dest) - return cb(&intrin->dest.ssa, state); + return cb(&intrin->def, state); return true; } case nir_instr_type_tex: - return cb(&nir_instr_as_tex(instr)->dest.ssa, state); + return cb(&nir_instr_as_tex(instr)->def, state); case nir_instr_type_phi: - return cb(&nir_instr_as_phi(instr)->dest.ssa, state); + return cb(&nir_instr_as_phi(instr)->def, state); case nir_instr_type_parallel_copy: { nir_foreach_parallel_copy_entry(entry, nir_instr_as_parallel_copy(instr)) { - if (!entry->dest_is_reg && !cb(&entry->dest.dest.ssa, state)) + if (!entry->dest_is_reg && !cb(&entry->dest.def, state)) return false; } return true; diff --git a/src/compiler/nir/nir_inline_uniforms.c b/src/compiler/nir/nir_inline_uniforms.c index ddcd279..1cc17f2 100644 --- a/src/compiler/nir/nir_inline_uniforms.c +++ b/src/compiler/nir/nir_inline_uniforms.c @@ -128,7 +128,7 @@ nir_collect_src_uniforms(const nir_src *src, int component, nir_src_is_const(intr->src[1]) && nir_src_as_uint(intr->src[1]) <= max_offset && /* TODO: Can't handle other bit sizes for now. */ - intr->dest.ssa.bit_size == 32) { + intr->def.bit_size == 32) { /* num_offsets can be NULL if-and-only-if uni_offsets is NULL. */ assert((num_offsets == NULL) == (uni_offsets == NULL)); @@ -399,8 +399,8 @@ nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms, nir_src_as_uint(intr->src[0]) == 0 && nir_src_is_const(intr->src[1]) && /* TODO: Can't handle other bit sizes for now. */ - intr->dest.ssa.bit_size == 32) { - int num_components = intr->dest.ssa.num_components; + intr->def.bit_size == 32) { + int num_components = intr->def.num_components; uint32_t offset = nir_src_as_uint(intr->src[1]) / 4; if (num_components == 1) { @@ -409,7 +409,7 @@ nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms, if (offset == uniform_dw_offsets[i]) { b.cursor = nir_before_instr(&intr->instr); nir_def *def = nir_imm_int(&b, uniform_values[i]); - nir_def_rewrite_uses(&intr->dest.ssa, def); + nir_def_rewrite_uses(&intr->def, def); nir_instr_remove(&intr->instr); break; } @@ -441,7 +441,7 @@ nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms, for (unsigned i = 0; i < num_components; i++) { if (!components[i]) { uint32_t scalar_offset = (offset + i) * 4; - components[i] = nir_load_ubo(&b, 1, intr->dest.ssa.bit_size, + components[i] = nir_load_ubo(&b, 1, intr->def.bit_size, intr->src[0].ssa, nir_imm_int(&b, scalar_offset)); nir_intrinsic_instr *load = @@ -453,7 +453,7 @@ nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms, } /* Replace the original uniform load. */ - nir_def_rewrite_uses(&intr->dest.ssa, + nir_def_rewrite_uses(&intr->def, nir_vec(&b, components, num_components)); nir_instr_remove(&intr->instr); } diff --git a/src/compiler/nir/nir_instr_set.c b/src/compiler/nir/nir_instr_set.c index e0d2da7..c87992f 100644 --- a/src/compiler/nir/nir_instr_set.c +++ b/src/compiler/nir/nir_instr_set.c @@ -213,8 +213,8 @@ hash_intrinsic(uint32_t hash, const nir_intrinsic_instr *instr) hash = HASH(hash, instr->intrinsic); if (info->has_dest) { - hash = HASH(hash, instr->dest.ssa.num_components); - hash = HASH(hash, instr->dest.ssa.bit_size); + hash = HASH(hash, instr->def.num_components); + hash = HASH(hash, instr->def.bit_size); } hash = XXH32(instr->const_index, info->num_indices * sizeof(instr->const_index[0]), hash); @@ -653,9 +653,9 @@ nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2) /* In case of phis with no sources, the dest needs to be checked * to ensure that phis with incompatible dests won't get merged * during CSE. */ - if (phi1->dest.ssa.num_components != phi2->dest.ssa.num_components) + if (phi1->def.num_components != phi2->def.num_components) return false; - if (phi1->dest.ssa.bit_size != phi2->dest.ssa.bit_size) + if (phi1->def.bit_size != phi2->def.bit_size) return false; nir_foreach_phi_src(src1, phi1) { @@ -681,12 +681,12 @@ nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2) intrinsic1->num_components != intrinsic2->num_components) return false; - if (info->has_dest && intrinsic1->dest.ssa.num_components != - intrinsic2->dest.ssa.num_components) + if (info->has_dest && intrinsic1->def.num_components != + intrinsic2->def.num_components) return false; - if (info->has_dest && intrinsic1->dest.ssa.bit_size != - intrinsic2->dest.ssa.bit_size) + if (info->has_dest && intrinsic1->def.bit_size != + intrinsic2->def.bit_size) return false; for (unsigned i = 0; i < info->num_srcs; i++) { @@ -713,21 +713,21 @@ nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2) } static nir_def * -nir_instr_get_dest_ssa_def(nir_instr *instr) +nir_instr_get_def_def(nir_instr *instr) { switch (instr->type) { case nir_instr_type_alu: return &nir_instr_as_alu(instr)->def; case nir_instr_type_deref: - return &nir_instr_as_deref(instr)->dest.ssa; + return &nir_instr_as_deref(instr)->def; case nir_instr_type_load_const: return &nir_instr_as_load_const(instr)->def; case nir_instr_type_phi: - return &nir_instr_as_phi(instr)->dest.ssa; + return &nir_instr_as_phi(instr)->def; case nir_instr_type_intrinsic: - return &nir_instr_as_intrinsic(instr)->dest.ssa; + return &nir_instr_as_intrinsic(instr)->def; case nir_instr_type_tex: - return &nir_instr_as_tex(instr)->dest.ssa; + return &nir_instr_as_tex(instr)->def; default: unreachable("We never ask for any of these"); } @@ -766,8 +766,8 @@ nir_instr_set_add_or_rewrite(struct set *instr_set, nir_instr *instr, if (!cond_function || cond_function(match, instr)) { /* rewrite instruction if condition is matched */ - nir_def *def = nir_instr_get_dest_ssa_def(instr); - nir_def *new_def = nir_instr_get_dest_ssa_def(match); + nir_def *def = nir_instr_get_def_def(instr); + nir_def *new_def = nir_instr_get_def_def(match); /* It's safe to replace an exact instruction with an inexact one as * long as we make it exact. If we got here, the two instructions are diff --git a/src/compiler/nir/nir_legacy.c b/src/compiler/nir/nir_legacy.c index fb5c40c..77a8fed 100644 --- a/src/compiler/nir/nir_legacy.c +++ b/src/compiler/nir/nir_legacy.c @@ -293,7 +293,7 @@ fuse_mods_with_registers(nir_builder *b, nir_instr *instr, void *fuse_fabs_) assert(!use->is_if); assert(use->parent_instr->type == nir_instr_type_alu); nir_alu_src *alu_use = list_entry(use, nir_alu_src, src); - nir_src_rewrite(&alu_use->src, &load->dest.ssa); + nir_src_rewrite(&alu_use->src, &load->def); for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i) alu_use->swizzle[i] = alu->src[0].swizzle[alu_use->swizzle[i]]; } diff --git a/src/compiler/nir/nir_linking_helpers.c b/src/compiler/nir/nir_linking_helpers.c index 8ac46f0..0d37738 100644 --- a/src/compiler/nir/nir_linking_helpers.c +++ b/src/compiler/nir/nir_linking_helpers.c @@ -1074,10 +1074,10 @@ replace_varying_input_by_constant_load(nir_shader *shader, /* Add new const to replace the input */ nir_def *nconst = nir_build_imm(&b, store_intr->num_components, - intr->dest.ssa.bit_size, + intr->def.bit_size, out_const->value); - nir_def_rewrite_uses(&intr->dest.ssa, nconst); + nir_def_rewrite_uses(&intr->def, nconst); progress = true; } @@ -1123,7 +1123,7 @@ replace_duplicate_input(nir_shader *shader, nir_variable *input_var, b.cursor = nir_before_instr(instr); nir_def *load = nir_load_var(&b, input_var); - nir_def_rewrite_uses(&intr->dest.ssa, load); + nir_def_rewrite_uses(&intr->def, load); progress = true; } @@ -1210,7 +1210,7 @@ clone_deref_instr(nir_builder *b, nir_variable *var, nir_deref_instr *deref) nir_load_const_instr *index = nir_instr_as_load_const(deref->arr.index.ssa->parent_instr); nir_def *ssa = nir_imm_intN_t(b, index->value->i64, - parent->dest.ssa.bit_size); + parent->def.bit_size); return nir_build_deref_ptr_as_array(b, parent, ssa); } case nir_deref_type_struct: @@ -1271,7 +1271,7 @@ replace_varying_input_by_uniform_load(nir_shader *shader, } /* Replace load input with load uniform. */ - nir_def_rewrite_uses(&intr->dest.ssa, uni_def); + nir_def_rewrite_uses(&intr->def, uni_def); progress = true; } diff --git a/src/compiler/nir/nir_liveness.c b/src/compiler/nir/nir_liveness.c index 221c143..2814e53 100644 --- a/src/compiler/nir/nir_liveness.c +++ b/src/compiler/nir/nir_liveness.c @@ -106,7 +106,7 @@ propagate_across_edge(nir_block *pred, nir_block *succ, memcpy(live, succ->live_in, state->bitset_words * sizeof *live); nir_foreach_phi(phi, succ) { - set_ssa_def_dead(&phi->dest.ssa, live); + set_ssa_def_dead(&phi->def, live); } nir_foreach_phi(phi, succ) { diff --git a/src/compiler/nir/nir_loop_analyze.c b/src/compiler/nir/nir_loop_analyze.c index 0b9a2e0..6b1a1ac 100644 --- a/src/compiler/nir/nir_loop_analyze.c +++ b/src/compiler/nir/nir_loop_analyze.c @@ -460,7 +460,7 @@ compute_induction_information(loop_info_state *state) /* Is one of the operands const or uniform, and the other the phi. * The phi source can't be swizzled in any way. */ - if (alu->src[1 - i].src.ssa == &phi->dest.ssa && + if (alu->src[1 - i].src.ssa == &phi->def && alu_src_has_identity_swizzle(alu, 1 - i)) { if (is_only_uniform_src(&alu->src[i].src)) var->update_src = alu->src + i; diff --git a/src/compiler/nir/nir_lower_array_deref_of_vec.c b/src/compiler/nir/nir_lower_array_deref_of_vec.c index 08a8d65..161e948 100644 --- a/src/compiler/nir/nir_lower_array_deref_of_vec.c +++ b/src/compiler/nir/nir_lower_array_deref_of_vec.c @@ -140,19 +140,19 @@ nir_lower_array_deref_of_vec_impl(nir_function_impl *impl, /* Turn the load into a vector load */ nir_instr_rewrite_src(&intrin->instr, &intrin->src[0], - nir_src_for_ssa(&vec_deref->dest.ssa)); - intrin->dest.ssa.num_components = num_components; + nir_src_for_ssa(&vec_deref->def)); + intrin->def.num_components = num_components; intrin->num_components = num_components; nir_def *index = nir_ssa_for_src(&b, deref->arr.index, 1); nir_def *scalar = - nir_vector_extract(&b, &intrin->dest.ssa, index); + nir_vector_extract(&b, &intrin->def, index); if (scalar->parent_instr->type == nir_instr_type_ssa_undef) { - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, scalar); nir_instr_remove(&intrin->instr); } else { - nir_def_rewrite_uses_after(&intrin->dest.ssa, + nir_def_rewrite_uses_after(&intrin->def, scalar, scalar->parent_instr); } diff --git a/src/compiler/nir/nir_lower_atomics_to_ssbo.c b/src/compiler/nir/nir_lower_atomics_to_ssbo.c index 9a8f62f..6922960 100644 --- a/src/compiler/nir/nir_lower_atomics_to_ssbo.c +++ b/src/compiler/nir/nir_lower_atomics_to_ssbo.c @@ -153,20 +153,20 @@ lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b, un * num_components with one that has variable number. So * best to take this from the dest: */ - new_instr->num_components = instr->dest.ssa.num_components; + new_instr->num_components = instr->def.num_components; } - nir_def_init(&new_instr->instr, &new_instr->dest.ssa, - instr->dest.ssa.num_components, instr->dest.ssa.bit_size); + nir_def_init(&new_instr->instr, &new_instr->def, + instr->def.num_components, instr->def.bit_size); nir_instr_insert_before(&instr->instr, &new_instr->instr); nir_instr_remove(&instr->instr); if (instr->intrinsic == nir_intrinsic_atomic_counter_pre_dec) { b->cursor = nir_after_instr(&new_instr->instr); - nir_def *result = nir_iadd(b, &new_instr->dest.ssa, temp); - nir_def_rewrite_uses(&instr->dest.ssa, result); + nir_def *result = nir_iadd(b, &new_instr->def, temp); + nir_def_rewrite_uses(&instr->def, result); } else { - nir_def_rewrite_uses(&instr->dest.ssa, &new_instr->dest.ssa); + nir_def_rewrite_uses(&instr->def, &new_instr->def); } return true; diff --git a/src/compiler/nir/nir_lower_bit_size.c b/src/compiler/nir/nir_lower_bit_size.c index 5dc6437..eb2d932 100644 --- a/src/compiler/nir/nir_lower_bit_size.c +++ b/src/compiler/nir/nir_lower_bit_size.c @@ -146,7 +146,7 @@ lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin, case nir_intrinsic_reduce: case nir_intrinsic_inclusive_scan: case nir_intrinsic_exclusive_scan: { - const unsigned old_bit_size = intrin->dest.ssa.bit_size; + const unsigned old_bit_size = intrin->def.bit_size; assert(old_bit_size < bit_size); nir_alu_type type = nir_type_uint; @@ -166,18 +166,18 @@ lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin, if (intrin->intrinsic == nir_intrinsic_vote_feq || intrin->intrinsic == nir_intrinsic_vote_ieq) { /* These return a Boolean; it's always 1-bit */ - assert(new_intrin->dest.ssa.bit_size == 1); + assert(new_intrin->def.bit_size == 1); } else { /* These return the same bit size as the source; we need to adjust * the size and then we'll have to emit a down-cast. */ - assert(intrin->src[0].ssa->bit_size == intrin->dest.ssa.bit_size); - new_intrin->dest.ssa.bit_size = bit_size; + assert(intrin->src[0].ssa->bit_size == intrin->def.bit_size); + new_intrin->def.bit_size = bit_size; } nir_builder_instr_insert(b, &new_intrin->instr); - nir_def *res = &new_intrin->dest.ssa; + nir_def *res = &new_intrin->def; if (intrin->intrinsic == nir_intrinsic_exclusive_scan) { /* For exclusive scan, we have to be careful because the identity * value for the higher bit size may get added into the mix by @@ -205,7 +205,7 @@ lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin, intrin->intrinsic != nir_intrinsic_vote_ieq) res = nir_u2uN(b, res, old_bit_size); - nir_def_rewrite_uses(&intrin->dest.ssa, res); + nir_def_rewrite_uses(&intrin->def, res); break; } @@ -218,7 +218,7 @@ static void lower_phi_instr(nir_builder *b, nir_phi_instr *phi, unsigned bit_size, nir_phi_instr *last_phi) { - unsigned old_bit_size = phi->dest.ssa.bit_size; + unsigned old_bit_size = phi->def.bit_size; assert(old_bit_size < bit_size); nir_foreach_phi_src(src, phi) { @@ -228,12 +228,12 @@ lower_phi_instr(nir_builder *b, nir_phi_instr *phi, unsigned bit_size, nir_instr_rewrite_src(&phi->instr, &src->src, nir_src_for_ssa(new_src)); } - phi->dest.ssa.bit_size = bit_size; + phi->def.bit_size = bit_size; b->cursor = nir_after_instr(&last_phi->instr); - nir_def *new_dest = nir_u2uN(b, &phi->dest.ssa, old_bit_size); - nir_def_rewrite_uses_after(&phi->dest.ssa, new_dest, + nir_def *new_dest = nir_u2uN(b, &phi->def, old_bit_size); + nir_def_rewrite_uses_after(&phi->def, new_dest, new_dest->parent_instr); } @@ -307,8 +307,8 @@ split_phi(nir_builder *b, nir_phi_instr *phi) nir_phi_instr_create(b->shader), nir_phi_instr_create(b->shader) }; - int num_components = phi->dest.ssa.num_components; - assert(phi->dest.ssa.bit_size == 64); + int num_components = phi->def.num_components; + assert(phi->def.bit_size == 64); nir_foreach_phi_src(src, phi) { assert(num_components == src->src.ssa->num_components); @@ -322,16 +322,16 @@ split_phi(nir_builder *b, nir_phi_instr *phi) nir_phi_instr_add_src(lowered[1], src->pred, nir_src_for_ssa(y)); } - nir_def_init(&lowered[0]->instr, &lowered[0]->dest.ssa, num_components, 32); - nir_def_init(&lowered[1]->instr, &lowered[1]->dest.ssa, num_components, 32); + nir_def_init(&lowered[0]->instr, &lowered[0]->def, num_components, 32); + nir_def_init(&lowered[1]->instr, &lowered[1]->def, num_components, 32); b->cursor = nir_before_instr(&phi->instr); nir_builder_instr_insert(b, &lowered[0]->instr); nir_builder_instr_insert(b, &lowered[1]->instr); b->cursor = nir_after_phis(nir_cursor_current_block(b->cursor)); - nir_def *merged = nir_pack_64_2x32_split(b, &lowered[0]->dest.ssa, &lowered[1]->dest.ssa); - nir_def_rewrite_uses(&phi->dest.ssa, merged); + nir_def *merged = nir_pack_64_2x32_split(b, &lowered[0]->def, &lowered[1]->def); + nir_def_rewrite_uses(&phi->def, merged); nir_instr_remove(&phi->instr); } @@ -343,7 +343,7 @@ lower_64bit_phi_instr(nir_builder *b, nir_instr *instr, UNUSED void *cb_data) nir_phi_instr *phi = nir_instr_as_phi(instr); - if (phi->dest.ssa.bit_size <= 32) + if (phi->def.bit_size <= 32) return false; split_phi(b, phi); diff --git a/src/compiler/nir/nir_lower_bitmap.c b/src/compiler/nir/nir_lower_bitmap.c index 14bf6a6..a70e981 100644 --- a/src/compiler/nir/nir_lower_bitmap.c +++ b/src/compiler/nir/nir_lower_bitmap.c @@ -80,17 +80,17 @@ lower_bitmap(nir_shader *shader, nir_builder *b, tex->coord_components = 2; tex->dest_type = nir_type_float32; tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &tex_deref->dest.ssa); + &tex_deref->def); tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref, - &tex_deref->dest.ssa); + &tex_deref->def); tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_coord, nir_trim_vector(b, texcoord, tex->coord_components)); - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(b, &tex->instr); /* kill if tex != 0.0.. take .x or .w channel according to format: */ - cond = nir_fneu_imm(b, nir_channel(b, &tex->dest.ssa, options->swizzle_xxxx ? 0 : 3), + cond = nir_fneu_imm(b, nir_channel(b, &tex->def, options->swizzle_xxxx ? 0 : 3), 0.0); nir_discard_if(b, cond); diff --git a/src/compiler/nir/nir_lower_bool_to_bitsize.c b/src/compiler/nir/nir_lower_bool_to_bitsize.c index 730d12e..96fe030 100644 --- a/src/compiler/nir/nir_lower_bool_to_bitsize.c +++ b/src/compiler/nir/nir_lower_bool_to_bitsize.c @@ -340,7 +340,7 @@ lower_load_const_instr(nir_load_const_instr *load) static bool lower_phi_instr(nir_builder *b, nir_phi_instr *phi) { - if (phi->dest.ssa.bit_size != 1) + if (phi->def.bit_size != 1) return false; /* Ensure all phi sources have a canonical bit-size. We choose the @@ -363,7 +363,7 @@ lower_phi_instr(nir_builder *b, nir_phi_instr *phi) } } - phi->dest.ssa.bit_size = dst_bit_size; + phi->def.bit_size = dst_bit_size; return true; } @@ -372,7 +372,7 @@ static bool lower_tex_instr(nir_tex_instr *tex) { bool progress = false; - rewrite_1bit_ssa_def_to_32bit(&tex->dest.ssa, &progress); + rewrite_1bit_ssa_def_to_32bit(&tex->def, &progress); if (tex->dest_type == nir_type_bool1) { tex->dest_type = nir_type_bool32; progress = true; diff --git a/src/compiler/nir/nir_lower_bool_to_float.c b/src/compiler/nir/nir_lower_bool_to_float.c index f221679..a45bcc0 100644 --- a/src/compiler/nir/nir_lower_bool_to_float.c +++ b/src/compiler/nir/nir_lower_bool_to_float.c @@ -198,7 +198,7 @@ static bool lower_tex_instr(nir_tex_instr *tex) { bool progress = false; - rewrite_1bit_ssa_def_to_32bit(&tex->dest.ssa, &progress); + rewrite_1bit_ssa_def_to_32bit(&tex->def, &progress); if (tex->dest_type == nir_type_bool1) { tex->dest_type = nir_type_bool32; progress = true; diff --git a/src/compiler/nir/nir_lower_bool_to_int32.c b/src/compiler/nir/nir_lower_bool_to_int32.c index 4138380..f7acf3d 100644 --- a/src/compiler/nir/nir_lower_bool_to_int32.c +++ b/src/compiler/nir/nir_lower_bool_to_int32.c @@ -166,7 +166,7 @@ static bool lower_tex_instr(nir_tex_instr *tex) { bool progress = false; - rewrite_1bit_ssa_def_to_32bit(&tex->dest.ssa, &progress); + rewrite_1bit_ssa_def_to_32bit(&tex->def, &progress); if (tex->dest_type == nir_type_bool1) { tex->dest_type = nir_type_bool32; progress = true; diff --git a/src/compiler/nir/nir_lower_cl_images.c b/src/compiler/nir/nir_lower_cl_images.c index 14e953e..016c45f 100644 --- a/src/compiler/nir/nir_lower_cl_images.c +++ b/src/compiler/nir/nir_lower_cl_images.c @@ -188,8 +188,8 @@ nir_lower_cl_images(nir_shader *shader, bool lower_image_derefs, bool lower_samp b.cursor = nir_instr_remove(&deref->instr); nir_def *loc = nir_imm_intN_t(&b, deref->var->data.driver_location, - deref->dest.ssa.bit_size); - nir_def_rewrite_uses(&deref->dest.ssa, loc); + deref->def.bit_size); + nir_def_rewrite_uses(&deref->def, loc); progress = true; break; } diff --git a/src/compiler/nir/nir_lower_const_arrays_to_uniforms.c b/src/compiler/nir/nir_lower_const_arrays_to_uniforms.c index b80ba5c..7a292b3 100644 --- a/src/compiler/nir/nir_lower_const_arrays_to_uniforms.c +++ b/src/compiler/nir/nir_lower_const_arrays_to_uniforms.c @@ -402,7 +402,7 @@ nir_lower_const_arrays_to_uniforms(nir_shader *shader, nir_def *new_def = nir_load_deref(&b, new_deref_instr); - nir_def_rewrite_uses(&intrin->dest.ssa, new_def); + nir_def_rewrite_uses(&intrin->def, new_def); nir_instr_remove(&intrin->instr); } } diff --git a/src/compiler/nir/nir_lower_convert_alu_types.c b/src/compiler/nir/nir_lower_convert_alu_types.c index 14f3561..140df2b 100644 --- a/src/compiler/nir/nir_lower_convert_alu_types.c +++ b/src/compiler/nir/nir_lower_convert_alu_types.c @@ -62,7 +62,7 @@ lower_convert_alu_types_instr(nir_builder *b, nir_intrinsic_instr *conv) nir_intrinsic_dest_type(conv), nir_intrinsic_rounding_mode(conv), nir_intrinsic_saturate(conv)); - nir_def_rewrite_uses(&conv->dest.ssa, val); + nir_def_rewrite_uses(&conv->def, val); } static bool diff --git a/src/compiler/nir/nir_lower_discard_or_demote.c b/src/compiler/nir/nir_lower_discard_or_demote.c index 4553134..d9a4cf6 100644 --- a/src/compiler/nir/nir_lower_discard_or_demote.c +++ b/src/compiler/nir/nir_lower_discard_or_demote.c @@ -67,7 +67,7 @@ nir_lower_demote_to_discard_instr(nir_builder *b, nir_instr *instr, void *data) * we can assume there are none */ b->cursor = nir_before_instr(instr); nir_def *zero = nir_imm_false(b); - nir_def_rewrite_uses(&intrin->dest.ssa, zero); + nir_def_rewrite_uses(&intrin->def, zero); nir_instr_remove_v(instr); return true; } @@ -117,7 +117,7 @@ nir_lower_load_helper_to_is_helper(nir_builder *b, nir_instr *instr, void *data) * top-level blocks to ensure correct behavior w.r.t. loops */ if (is_helper == NULL) is_helper = insert_is_helper(b, instr); - nir_def_rewrite_uses(&intrin->dest.ssa, is_helper); + nir_def_rewrite_uses(&intrin->def, is_helper); nir_instr_remove_v(instr); return true; default: diff --git a/src/compiler/nir/nir_lower_double_ops.c b/src/compiler/nir/nir_lower_double_ops.c index c4d7073..07be96d 100644 --- a/src/compiler/nir/nir_lower_double_ops.c +++ b/src/compiler/nir/nir_lower_double_ops.c @@ -619,7 +619,7 @@ lower_doubles_instr_to_soft(nir_builder *b, nir_alu_instr *instr, nir_variable *ret_tmp = nir_local_variable_create(b->impl, return_type, "return_tmp"); nir_deref_instr *ret_deref = nir_build_deref_var(b, ret_tmp); - params[0] = &ret_deref->dest.ssa; + params[0] = &ret_deref->def; assert(nir_op_infos[instr->op].num_inputs + 1 == func->num_params); for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) { diff --git a/src/compiler/nir/nir_lower_drawpixels.c b/src/compiler/nir/nir_lower_drawpixels.c index f566f5c..173fe11 100644 --- a/src/compiler/nir/nir_lower_drawpixels.c +++ b/src/compiler/nir/nir_lower_drawpixels.c @@ -111,16 +111,16 @@ lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr * tex->coord_components = 2; tex->dest_type = nir_type_float32; tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &tex_deref->dest.ssa); + &tex_deref->def); tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref, - &tex_deref->dest.ssa); + &tex_deref->def); tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_coord, nir_trim_vector(b, texcoord, tex->coord_components)); - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(b, &tex->instr); - def = &tex->dest.ssa; + def = &tex->def; /* Apply the scale and bias. */ if (state->options->scale_and_bias) { @@ -152,15 +152,15 @@ lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr * tex->texture_index = state->options->pixelmap_sampler; tex->dest_type = nir_type_float32; tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &pixelmap_deref->dest.ssa); + &pixelmap_deref->def); tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref, - &pixelmap_deref->dest.ssa); + &pixelmap_deref->def); tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_coord, nir_trim_vector(b, def, 2)); - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(b, &tex->instr); - def_xy = &tex->dest.ssa; + def_xy = &tex->def; /* TEX def.zw, def.zwww, pixelmap_sampler, 2D; */ tex = nir_tex_instr_create(state->shader, 1); @@ -172,9 +172,9 @@ lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr * tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_coord, nir_channels(b, def, 0xc)); - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(b, &tex->instr); - def_zw = &tex->dest.ssa; + def_zw = &tex->def; /* def = vec4(def.xy, def.zw); */ def = nir_vec4(b, @@ -184,7 +184,7 @@ lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr * nir_channel(b, def_zw, 1)); } - nir_def_rewrite_uses(&intr->dest.ssa, def); + nir_def_rewrite_uses(&intr->def, def); return true; } @@ -194,7 +194,7 @@ lower_texcoord(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_inst b->cursor = nir_before_instr(&intr->instr); nir_def *texcoord_const = get_texcoord_const(b, state); - nir_def_rewrite_uses(&intr->dest.ssa, texcoord_const); + nir_def_rewrite_uses(&intr->def, texcoord_const); return true; } diff --git a/src/compiler/nir/nir_lower_fb_read.c b/src/compiler/nir/nir_lower_fb_read.c index abd2f28..3f0614f 100644 --- a/src/compiler/nir/nir_lower_fb_read.c +++ b/src/compiler/nir/nir_lower_fb_read.c @@ -76,10 +76,10 @@ nir_lower_fb_read_instr(nir_builder *b, nir_instr *instr, UNUSED void *cb_data) tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_texture_handle, nir_imm_intN_t(b, io.location - FRAG_RESULT_DATA0, 32)); - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(b, &tex->instr); - nir_def_rewrite_uses(&intr->dest.ssa, &tex->dest.ssa); + nir_def_rewrite_uses(&intr->def, &tex->def); return true; } diff --git a/src/compiler/nir/nir_lower_fp16_conv.c b/src/compiler/nir/nir_lower_fp16_conv.c index 057a355..571cdf4 100644 --- a/src/compiler/nir/nir_lower_fp16_conv.c +++ b/src/compiler/nir/nir_lower_fp16_conv.c @@ -202,7 +202,7 @@ lower_fp16_cast_impl(nir_builder *b, nir_instr *instr, void *data) nir_intrinsic_dest_type(intrin) != nir_type_float16) return false; src = intrin->src[0].ssa; - dst = &intrin->dest.ssa; + dst = &intrin->def; mode = nir_intrinsic_rounding_mode(intrin); } else { return false; diff --git a/src/compiler/nir/nir_lower_frag_coord_to_pixel_coord.c b/src/compiler/nir/nir_lower_frag_coord_to_pixel_coord.c index 3425075..79356ec 100644 --- a/src/compiler/nir/nir_lower_frag_coord_to_pixel_coord.c +++ b/src/compiler/nir/nir_lower_frag_coord_to_pixel_coord.c @@ -27,7 +27,7 @@ lower(nir_builder *b, nir_instr *instr, UNUSED void *data) nir_def *vec = nir_vec4(b, nir_channel(b, xy, 0), nir_channel(b, xy, 1), nir_load_frag_coord_zw(b, .component = 2), nir_load_frag_coord_zw(b, .component = 3)); - nir_def_rewrite_uses(&intr->dest.ssa, vec); + nir_def_rewrite_uses(&intr->def, vec); return true; } diff --git a/src/compiler/nir/nir_lower_fragcoord_wtrans.c b/src/compiler/nir/nir_lower_fragcoord_wtrans.c index d92fe96..7debbff 100644 --- a/src/compiler/nir/nir_lower_fragcoord_wtrans.c +++ b/src/compiler/nir/nir_lower_fragcoord_wtrans.c @@ -60,10 +60,10 @@ lower_fragcoord_wtrans_impl(nir_builder *b, nir_instr *instr, nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); return nir_vec4(b, - nir_channel(b, &intr->dest.ssa, 0), - nir_channel(b, &intr->dest.ssa, 1), - nir_channel(b, &intr->dest.ssa, 2), - nir_frcp(b, nir_channel(b, &intr->dest.ssa, 3))); + nir_channel(b, &intr->def, 0), + nir_channel(b, &intr->def, 1), + nir_channel(b, &intr->def, 2), + nir_frcp(b, nir_channel(b, &intr->def, 3))); } bool diff --git a/src/compiler/nir/nir_lower_helper_writes.c b/src/compiler/nir/nir_lower_helper_writes.c index 8d7eba2..15208f2 100644 --- a/src/compiler/nir/nir_lower_helper_writes.c +++ b/src/compiler/nir/nir_lower_helper_writes.c @@ -76,26 +76,26 @@ lower(nir_builder *b, nir_instr *instr, void *data) */ if (has_dest) { nir_push_else(b, NULL); - undef = nir_undef(b, intr->dest.ssa.num_components, - intr->dest.ssa.bit_size); + undef = nir_undef(b, intr->def.num_components, + intr->def.bit_size); } nir_pop_if(b, NULL); if (has_dest) { - nir_def *phi = nir_if_phi(b, &intr->dest.ssa, undef); + nir_def *phi = nir_if_phi(b, &intr->def, undef); /* We can't use nir_def_rewrite_uses_after on phis, so use the global * version and fixup the phi manually */ - nir_def_rewrite_uses(&intr->dest.ssa, phi); + nir_def_rewrite_uses(&intr->def, phi); nir_instr *phi_instr = phi->parent_instr; nir_phi_instr *phi_as_phi = nir_instr_as_phi(phi_instr); nir_phi_src *phi_src = nir_phi_get_src_from_block(phi_as_phi, instr->block); nir_instr_rewrite_src_ssa(phi->parent_instr, &phi_src->src, - &intr->dest.ssa); + &intr->def); } return true; diff --git a/src/compiler/nir/nir_lower_image.c b/src/compiler/nir/nir_lower_image.c index e6e6489..9b11bd5 100644 --- a/src/compiler/nir/nir_lower_image.c +++ b/src/compiler/nir/nir_lower_image.c @@ -46,7 +46,7 @@ lower_cube_size(nir_builder *b, nir_intrinsic_instr *intrin) nir_def *size = nir_instr_ssa_def(&_2darray_size->instr); nir_scalar comps[NIR_MAX_VEC_COMPONENTS] = { 0 }; - unsigned coord_comps = intrin->dest.ssa.num_components; + unsigned coord_comps = intrin->def.num_components; for (unsigned c = 0; c < coord_comps; c++) { if (c == 2) { comps[2] = nir_get_ssa_scalar(nir_idiv(b, nir_channel(b, size, 2), nir_imm_int(b, 6)), 0); @@ -55,8 +55,8 @@ lower_cube_size(nir_builder *b, nir_intrinsic_instr *intrin) } } - nir_def *vec = nir_vec_scalars(b, comps, intrin->dest.ssa.num_components); - nir_def_rewrite_uses(&intrin->dest.ssa, vec); + nir_def *vec = nir_vec_scalars(b, comps, intrin->def.num_components); + nir_def_rewrite_uses(&intrin->def, vec); nir_instr_remove(&intrin->instr); nir_instr_free(&intrin->instr); } @@ -149,11 +149,11 @@ lower_image_samples_identical_to_fragment_mask_load(nir_builder *b, nir_intrinsi break; } - nir_def_init(&fmask_load->instr, &fmask_load->dest.ssa, 1, 32); + nir_def_init(&fmask_load->instr, &fmask_load->def, 1, 32); nir_builder_instr_insert(b, &fmask_load->instr); - nir_def *samples_identical = nir_ieq_imm(b, &fmask_load->dest.ssa, 0); - nir_def_rewrite_uses(&intrin->dest.ssa, samples_identical); + nir_def *samples_identical = nir_ieq_imm(b, &fmask_load->def, 0); + nir_def_rewrite_uses(&intrin->def, samples_identical); nir_instr_remove(&intrin->instr); nir_instr_free(&intrin->instr); @@ -206,8 +206,8 @@ lower_image_instr(nir_builder *b, nir_instr *instr, void *state) case nir_intrinsic_bindless_image_samples: { if (options->lower_image_samples_to_one) { b->cursor = nir_after_instr(&intrin->instr); - nir_def *samples = nir_imm_intN_t(b, 1, intrin->dest.ssa.bit_size); - nir_def_rewrite_uses(&intrin->dest.ssa, samples); + nir_def *samples = nir_imm_intN_t(b, 1, intrin->def.bit_size); + nir_def_rewrite_uses(&intrin->def, samples); return true; } return false; diff --git a/src/compiler/nir/nir_lower_image_atomics_to_global.c b/src/compiler/nir/nir_lower_image_atomics_to_global.c index 45f1570..97c73b4 100644 --- a/src/compiler/nir/nir_lower_image_atomics_to_global.c +++ b/src/compiler/nir/nir_lower_image_atomics_to_global.c @@ -43,7 +43,7 @@ lower(nir_builder *b, nir_instr *instr, UNUSED void *_) b->cursor = nir_before_instr(instr); nir_atomic_op atomic_op = nir_intrinsic_atomic_op(intr); enum pipe_format format = nir_intrinsic_format(intr); - unsigned bit_size = intr->dest.ssa.bit_size; + unsigned bit_size = intr->def.bit_size; /* Even for "formatless" access, we know the size of the texel accessed, * since it's the size of the atomic. We can use that to synthesize a @@ -93,7 +93,7 @@ lower(nir_builder *b, nir_instr *instr, UNUSED void *_) /* Replace the image atomic with the global atomic. Remove the image * explicitly because it has side effects so is not DCE'd. */ - nir_def_rewrite_uses(&intr->dest.ssa, global); + nir_def_rewrite_uses(&intr->def, global); nir_instr_remove(instr); return true; } diff --git a/src/compiler/nir/nir_lower_indirect_derefs.c b/src/compiler/nir/nir_lower_indirect_derefs.c index 9e06ef4..9c6f758 100644 --- a/src/compiler/nir/nir_lower_indirect_derefs.c +++ b/src/compiler/nir/nir_lower_indirect_derefs.c @@ -93,18 +93,18 @@ emit_load_store_deref(nir_builder *b, nir_intrinsic_instr *orig_instr, nir_intrinsic_instr_create(b->shader, orig_instr->intrinsic); load->num_components = orig_instr->num_components; - load->src[0] = nir_src_for_ssa(&parent->dest.ssa); + load->src[0] = nir_src_for_ssa(&parent->def); /* Copy over any other sources. This is needed for interp_deref_at */ for (unsigned i = 1; i < nir_intrinsic_infos[orig_instr->intrinsic].num_srcs; i++) nir_src_copy(&load->src[i], &orig_instr->src[i], &load->instr); - nir_def_init(&load->instr, &load->dest.ssa, - orig_instr->dest.ssa.num_components, - orig_instr->dest.ssa.bit_size); + nir_def_init(&load->instr, &load->def, + orig_instr->def.num_components, + orig_instr->def.bit_size); nir_builder_instr_insert(b, &load->instr); - *dest = &load->dest.ssa; + *dest = &load->def; } else { assert(orig_instr->intrinsic == nir_intrinsic_store_deref); nir_store_deref(b, parent, src, nir_intrinsic_write_mask(orig_instr)); @@ -175,7 +175,7 @@ lower_indirect_derefs_block(nir_block *block, nir_builder *b, nir_def *result; emit_load_store_deref(b, intrin, base, &path.path[1], &result, NULL); - nir_def_rewrite_uses(&intrin->dest.ssa, result); + nir_def_rewrite_uses(&intrin->def, result); } nir_deref_path_finish(&path); diff --git a/src/compiler/nir/nir_lower_input_attachments.c b/src/compiler/nir/nir_lower_input_attachments.c index d864087..10094ce 100644 --- a/src/compiler/nir/nir_lower_input_attachments.c +++ b/src/compiler/nir/nir_lower_input_attachments.c @@ -121,7 +121,7 @@ try_lower_input_load(nir_builder *b, nir_intrinsic_instr *load, tex->sampler_index = 0; tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &deref->dest.ssa); + &deref->def); tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_coord, coord); tex->coord_components = 3; @@ -135,19 +135,19 @@ try_lower_input_load(nir_builder *b, nir_intrinsic_instr *load, tex->texture_non_uniform = nir_intrinsic_access(load) & ACCESS_NON_UNIFORM; - nir_def_init(&tex->instr, &tex->dest.ssa, nir_tex_instr_dest_size(tex), 32); + nir_def_init(&tex->instr, &tex->def, nir_tex_instr_dest_size(tex), 32); nir_builder_instr_insert(b, &tex->instr); if (tex->is_sparse) { - unsigned load_result_size = load->dest.ssa.num_components - 1; + unsigned load_result_size = load->def.num_components - 1; nir_component_mask_t load_result_mask = nir_component_mask(load_result_size); nir_def *res = nir_channels( - b, &tex->dest.ssa, load_result_mask | 0x10); + b, &tex->def, load_result_mask | 0x10); - nir_def_rewrite_uses(&load->dest.ssa, res); + nir_def_rewrite_uses(&load->def, res); } else { - nir_def_rewrite_uses(&load->dest.ssa, - &tex->dest.ssa); + nir_def_rewrite_uses(&load->def, + &tex->def); } return true; diff --git a/src/compiler/nir/nir_lower_int64.c b/src/compiler/nir/nir_lower_int64.c index b015700..29c4060 100644 --- a/src/compiler/nir/nir_lower_int64.c +++ b/src/compiler/nir/nir_lower_int64.c @@ -1152,7 +1152,7 @@ split_64bit_subgroup_op(nir_builder *b, const nir_intrinsic_instr *intrin) nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa), }; - assert(info->has_dest && intrin->dest.ssa.bit_size == 64); + assert(info->has_dest && intrin->def.bit_size == 64); nir_def *res[2]; for (unsigned i = 0; i < 2; i++) { @@ -1171,11 +1171,11 @@ split_64bit_subgroup_op(nir_builder *b, const nir_intrinsic_instr *intrin) memcpy(split->const_index, intrin->const_index, sizeof(intrin->const_index)); - nir_def_init(&split->instr, &split->dest.ssa, - intrin->dest.ssa.num_components, 32); + nir_def_init(&split->instr, &split->def, + intrin->def.num_components, 32); nir_builder_instr_insert(b, &split->instr); - res[i] = &split->dest.ssa; + res[i] = &split->def; } return nir_pack_64_2x32_split(b, res[0], res[1]); @@ -1188,9 +1188,9 @@ build_vote_ieq(nir_builder *b, nir_def *x) nir_intrinsic_instr_create(b->shader, nir_intrinsic_vote_ieq); vote->src[0] = nir_src_for_ssa(x); vote->num_components = x->num_components; - nir_def_init(&vote->instr, &vote->dest.ssa, 1, 1); + nir_def_init(&vote->instr, &vote->def, 1, 1); nir_builder_instr_insert(b, &vote->instr); - return &vote->dest.ssa; + return &vote->def; } static nir_def * @@ -1212,10 +1212,10 @@ build_scan_intrinsic(nir_builder *b, nir_intrinsic_op scan_op, nir_intrinsic_set_reduction_op(scan, reduction_op); if (scan_op == nir_intrinsic_reduce) nir_intrinsic_set_cluster_size(scan, cluster_size); - nir_def_init(&scan->instr, &scan->dest.ssa, val->num_components, + nir_def_init(&scan->instr, &scan->def, val->num_components, val->bit_size); nir_builder_instr_insert(b, &scan->instr); - return &scan->dest.ssa; + return &scan->def; } static nir_def * @@ -1271,7 +1271,7 @@ should_lower_int64_intrinsic(const nir_intrinsic_instr *intrin, case nir_intrinsic_quad_swap_horizontal: case nir_intrinsic_quad_swap_vertical: case nir_intrinsic_quad_swap_diagonal: - return intrin->dest.ssa.bit_size == 64 && + return intrin->def.bit_size == 64 && (options->lower_int64_options & nir_lower_subgroup_shuffle64); case nir_intrinsic_vote_ieq: @@ -1281,7 +1281,7 @@ should_lower_int64_intrinsic(const nir_intrinsic_instr *intrin, case nir_intrinsic_reduce: case nir_intrinsic_inclusive_scan: case nir_intrinsic_exclusive_scan: - if (intrin->dest.ssa.bit_size != 64) + if (intrin->def.bit_size != 64) return false; switch (nir_intrinsic_reduction_op(intrin)) { diff --git a/src/compiler/nir/nir_lower_interpolation.c b/src/compiler/nir/nir_lower_interpolation.c index a62a30e..5f0af1c 100644 --- a/src/compiler/nir/nir_lower_interpolation.c +++ b/src/compiler/nir/nir_lower_interpolation.c @@ -116,7 +116,7 @@ nir_lower_interpolation_instr(nir_builder *b, nir_instr *instr, void *cb_data) comps[i] = val; } nir_def *vec = nir_vec(b, comps, intr->num_components); - nir_def_rewrite_uses(&intr->dest.ssa, vec); + nir_def_rewrite_uses(&intr->def, vec); return true; } diff --git a/src/compiler/nir/nir_lower_io.c b/src/compiler/nir/nir_lower_io.c index 885bd20..b0a042d 100644 --- a/src/compiler/nir/nir_lower_io.c +++ b/src/compiler/nir/nir_lower_io.c @@ -337,10 +337,10 @@ emit_load(struct lower_io_state *state, load->src[0] = nir_src_for_ssa(offset); } - nir_def_init(&load->instr, &load->dest.ssa, num_components, bit_size); + nir_def_init(&load->instr, &load->def, num_components, bit_size); nir_builder_instr_insert(b, &load->instr); - return &load->dest.ssa; + return &load->def; } static nir_def * @@ -349,7 +349,7 @@ lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state, unsigned component, const struct glsl_type *type) { const bool lower_double = !glsl_type_is_integer(type) && state->options & nir_lower_io_lower_64bit_float_to_32; - if (intrin->dest.ssa.bit_size == 64 && + if (intrin->def.bit_size == 64 && (lower_double || (state->options & nir_lower_io_lower_64bit_to_32))) { nir_builder *b = &state->builder; @@ -358,9 +358,9 @@ lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state, nir_def *comp64[4]; assert(component == 0 || component == 2); unsigned dest_comp = 0; - while (dest_comp < intrin->dest.ssa.num_components) { + while (dest_comp < intrin->def.num_components) { const unsigned num_comps = - MIN2(intrin->dest.ssa.num_components - dest_comp, + MIN2(intrin->def.num_components - dest_comp, (4 - component) / 2); nir_def *data32 = @@ -377,18 +377,18 @@ lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state, offset = nir_iadd_imm(b, offset, slot_size); } - return nir_vec(b, comp64, intrin->dest.ssa.num_components); - } else if (intrin->dest.ssa.bit_size == 1) { + return nir_vec(b, comp64, intrin->def.num_components); + } else if (intrin->def.bit_size == 1) { /* Booleans are 32-bit */ assert(glsl_type_is_boolean(type)); return nir_b2b1(&state->builder, emit_load(state, array_index, var, offset, component, - intrin->dest.ssa.num_components, 32, + intrin->def.num_components, 32, nir_type_bool32)); } else { return emit_load(state, array_index, var, offset, component, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size, + intrin->def.num_components, + intrin->def.bit_size, nir_get_nir_type_for_glsl_type(type)); } } @@ -497,7 +497,7 @@ lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state, write_mask >>= num_comps; offset = nir_iadd_imm(b, offset, slot_size); } - } else if (intrin->dest.ssa.bit_size == 1) { + } else if (intrin->def.bit_size == 1) { /* Booleans are 32-bit */ assert(glsl_type_is_boolean(type)); nir_def *b32_val = nir_b2b32(&state->builder, intrin->src[1].ssa); @@ -537,7 +537,7 @@ lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state, } /* None of the supported APIs allow interpolation on 64-bit things */ - assert(intrin->dest.ssa.bit_size <= 32); + assert(intrin->def.bit_size <= 32); nir_intrinsic_op bary_op; switch (intrin->intrinsic) { @@ -557,7 +557,7 @@ lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state, nir_intrinsic_instr *bary_setup = nir_intrinsic_instr_create(state->builder.shader, bary_op); - nir_def_init(&bary_setup->instr, &bary_setup->dest.ssa, 2, 32); + nir_def_init(&bary_setup->instr, &bary_setup->def, 2, 32); nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation); if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample || @@ -576,14 +576,14 @@ lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state, nir_def *load = nir_load_interpolated_input(&state->builder, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size, - &bary_setup->dest.ssa, + intrin->def.num_components, + intrin->def.bit_size, + &bary_setup->def, offset, .base = var->data.driver_location, .component = component, .io_semantics = semantics, - .dest_type = nir_type_float | intrin->dest.ssa.bit_size); + .dest_type = nir_type_float | intrin->def.bit_size); return load; } @@ -658,9 +658,9 @@ nir_lower_io_block(nir_block *block, */ if (intrin->intrinsic != nir_intrinsic_store_deref) { nir_def *zero = - nir_imm_zero(b, intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_imm_zero(b, intrin->def.num_components, + intrin->def.bit_size); + nir_def_rewrite_uses(&intrin->def, zero); } @@ -700,7 +700,7 @@ nir_lower_io_block(nir_block *block, } if (replacement) { - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, replacement); } nir_instr_remove(&intrin->instr); @@ -1488,7 +1488,7 @@ build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin, nir_intrinsic_set_range(load, glsl_get_explicit_size(var->type, false)); } - unsigned bit_size = intrin->dest.ssa.bit_size; + unsigned bit_size = intrin->def.bit_size; if (bit_size == 1) { /* TODO: Make the native bool bit_size an option. */ bit_size = 32; @@ -1505,7 +1505,7 @@ build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin, } load->num_components = num_components; - nir_def_init(&load->instr, &load->dest.ssa, num_components, bit_size); + nir_def_init(&load->instr, &load->def, num_components, bit_size); assert(bit_size % 8 == 0); @@ -1529,13 +1529,13 @@ build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin, nir_pop_if(b, NULL); - result = nir_if_phi(b, &load->dest.ssa, zero); + result = nir_if_phi(b, &load->def, zero); } else { nir_builder_instr_insert(b, &load->instr); - result = &load->dest.ssa; + result = &load->def; } - if (intrin->dest.ssa.bit_size == 1) { + if (intrin->def.bit_size == 1) { /* For shared, we can go ahead and use NIR's and/or the back-end's * standard encoding for booleans rather than forcing a 0/1 boolean. * This should save an instruction or two. @@ -1811,24 +1811,24 @@ build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin, if (nir_intrinsic_has_access(atomic)) nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin)); - assert(intrin->dest.ssa.num_components == 1); - nir_def_init(&atomic->instr, &atomic->dest.ssa, 1, - intrin->dest.ssa.bit_size); + assert(intrin->def.num_components == 1); + nir_def_init(&atomic->instr, &atomic->def, 1, + intrin->def.bit_size); - assert(atomic->dest.ssa.bit_size % 8 == 0); + assert(atomic->def.bit_size % 8 == 0); if (addr_format_needs_bounds_check(addr_format)) { - const unsigned atomic_size = atomic->dest.ssa.bit_size / 8; + const unsigned atomic_size = atomic->def.bit_size / 8; nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size)); nir_builder_instr_insert(b, &atomic->instr); nir_pop_if(b, NULL); - return nir_if_phi(b, &atomic->dest.ssa, - nir_undef(b, 1, atomic->dest.ssa.bit_size)); + return nir_if_phi(b, &atomic->def, + nir_undef(b, 1, atomic->def.bit_size)); } else { nir_builder_instr_insert(b, &atomic->instr); - return &atomic->dest.ssa; + return &atomic->def; } } @@ -1935,7 +1935,7 @@ nir_lower_explicit_io_instr(nir_builder *b, deref->modes, align_mul, align_offset, intrin->num_components); } - nir_def_rewrite_uses(&intrin->dest.ssa, value); + nir_def_rewrite_uses(&intrin->def, value); break; } @@ -1969,7 +1969,7 @@ nir_lower_explicit_io_instr(nir_builder *b, deref->modes, align_mul, align_offset, intrin->num_components); - nir_def_rewrite_uses(&intrin->dest.ssa, value); + nir_def_rewrite_uses(&intrin->def, value); break; } @@ -1985,7 +1985,7 @@ nir_lower_explicit_io_instr(nir_builder *b, default: { nir_def *value = build_explicit_io_atomic(b, intrin, addr, addr_format, deref->modes); - nir_def_rewrite_uses(&intrin->dest.ssa, value); + nir_def_rewrite_uses(&intrin->def, value); break; } } @@ -2108,7 +2108,7 @@ lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref, * one deref which could break our list walking since we walk the list * backwards. */ - if (nir_def_is_unused(&deref->dest.ssa)) { + if (nir_def_is_unused(&deref->def)) { nir_instr_remove(&deref->instr); return; } @@ -2122,11 +2122,11 @@ lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref, nir_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr, addr_format); - assert(addr->bit_size == deref->dest.ssa.bit_size); - assert(addr->num_components == deref->dest.ssa.num_components); + assert(addr->bit_size == deref->def.bit_size); + assert(addr->num_components == deref->def.num_components); nir_instr_remove(&deref->instr); - nir_def_rewrite_uses(&deref->dest.ssa, addr); + nir_def_rewrite_uses(&deref->def, addr); } static void @@ -2150,7 +2150,7 @@ lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin, unsigned stride = glsl_get_explicit_stride(deref->type); assert(stride > 0); - nir_def *addr = &deref->dest.ssa; + nir_def *addr = &deref->def; nir_def *offset, *size; switch (addr_format) { @@ -2177,7 +2177,7 @@ lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin, nir_def *remaining = nir_usub_sat(b, size, offset); nir_def *arr_size = nir_udiv_imm(b, remaining, stride); - nir_def_rewrite_uses(&intrin->dest.ssa, arr_size); + nir_def_rewrite_uses(&intrin->def, arr_size); nir_instr_remove(&intrin->instr); } @@ -2203,7 +2203,7 @@ lower_explicit_io_mode_check(nir_builder *b, nir_intrinsic_instr *intrin, build_runtime_addr_mode_check(b, addr, addr_format, nir_intrinsic_memory_modes(intrin)); - nir_def_rewrite_uses(&intrin->dest.ssa, is_mode); + nir_def_rewrite_uses(&intrin->def, is_mode); } static bool @@ -2844,8 +2844,8 @@ is_dual_slot(nir_intrinsic_instr *intrin) nir_src_num_components(intrin->src[0]) >= 3; } - return intrin->dest.ssa.bit_size == 64 && - intrin->dest.ssa.num_components >= 3; + return intrin->def.bit_size == 64 && + intrin->def.num_components >= 3; } /** @@ -2991,7 +2991,7 @@ nir_lower_color_inputs(nir_shader *nir) load = nir_channels(&b, load, BITFIELD_RANGE(start, count)); } - nir_def_rewrite_uses(&intrin->dest.ssa, load); + nir_def_rewrite_uses(&intrin->def, load); nir_instr_remove(instr); progress = true; } diff --git a/src/compiler/nir/nir_lower_io_arrays_to_elements.c b/src/compiler/nir/nir_lower_io_arrays_to_elements.c index 7c6e664..d60f839 100644 --- a/src/compiler/nir/nir_lower_io_arrays_to_elements.c +++ b/src/compiler/nir/nir_lower_io_arrays_to_elements.c @@ -114,9 +114,9 @@ lower_array(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var, if (nir_deref_instr_is_known_out_of_bounds(nir_src_as_deref(intr->src[0]))) { /* See Section 5.11 (Out-of-Bounds Accesses) of the GLSL 4.60 */ if (intr->intrinsic != nir_intrinsic_store_deref) { - nir_def *zero = nir_imm_zero(b, intr->dest.ssa.num_components, - intr->dest.ssa.bit_size); - nir_def_rewrite_uses(&intr->dest.ssa, + nir_def *zero = nir_imm_zero(b, intr->def.num_components, + intr->def.bit_size); + nir_def_rewrite_uses(&intr->def, zero); } nir_instr_remove(&intr->instr); @@ -168,11 +168,11 @@ lower_array(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var, nir_intrinsic_instr *element_intr = nir_intrinsic_instr_create(b->shader, intr->intrinsic); element_intr->num_components = intr->num_components; - element_intr->src[0] = nir_src_for_ssa(&element_deref->dest.ssa); + element_intr->src[0] = nir_src_for_ssa(&element_deref->def); if (intr->intrinsic != nir_intrinsic_store_deref) { - nir_def_init(&element_intr->instr, &element_intr->dest.ssa, - intr->num_components, intr->dest.ssa.bit_size); + nir_def_init(&element_intr->instr, &element_intr->def, + intr->num_components, intr->def.bit_size); if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset || intr->intrinsic == nir_intrinsic_interp_deref_at_sample || @@ -181,8 +181,8 @@ lower_array(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var, &element_intr->instr); } - nir_def_rewrite_uses(&intr->dest.ssa, - &element_intr->dest.ssa); + nir_def_rewrite_uses(&intr->def, + &element_intr->def); } else { nir_intrinsic_set_write_mask(element_intr, nir_intrinsic_write_mask(intr)); diff --git a/src/compiler/nir/nir_lower_io_to_scalar.c b/src/compiler/nir/nir_lower_io_to_scalar.c index e344e75..a7824dc 100644 --- a/src/compiler/nir/nir_lower_io_to_scalar.c +++ b/src/compiler/nir/nir_lower_io_to_scalar.c @@ -53,8 +53,8 @@ lower_load_input_to_scalar(nir_builder *b, nir_intrinsic_instr *intr) unsigned newc = nir_intrinsic_component(intr); nir_intrinsic_instr *chan_intr = nir_intrinsic_instr_create(b->shader, intr->intrinsic); - nir_def_init(&chan_intr->instr, &chan_intr->dest.ssa, 1, - intr->dest.ssa.bit_size); + nir_def_init(&chan_intr->instr, &chan_intr->def, 1, + intr->def.bit_size); chan_intr->num_components = 1; nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr)); @@ -73,10 +73,10 @@ lower_load_input_to_scalar(nir_builder *b, nir_intrinsic_instr *intr) nir_builder_instr_insert(b, &chan_intr->instr); - loads[i] = &chan_intr->dest.ssa; + loads[i] = &chan_intr->def; } - nir_def_rewrite_uses(&intr->dest.ssa, + nir_def_rewrite_uses(&intr->def, nir_vec(b, loads, intr->num_components)); nir_instr_remove(&intr->instr); } @@ -92,13 +92,13 @@ lower_load_to_scalar(nir_builder *b, nir_intrinsic_instr *intr) for (unsigned i = 0; i < intr->num_components; i++) { nir_intrinsic_instr *chan_intr = nir_intrinsic_instr_create(b->shader, intr->intrinsic); - nir_def_init(&chan_intr->instr, &chan_intr->dest.ssa, 1, - intr->dest.ssa.bit_size); + nir_def_init(&chan_intr->instr, &chan_intr->def, 1, + intr->def.bit_size); chan_intr->num_components = 1; nir_intrinsic_set_align_offset(chan_intr, (nir_intrinsic_align_offset(intr) + - i * (intr->dest.ssa.bit_size / 8)) % + i * (intr->def.bit_size / 8)) % nir_intrinsic_align_mul(intr)); nir_intrinsic_set_align_mul(chan_intr, nir_intrinsic_align_mul(intr)); if (nir_intrinsic_has_access(intr)) @@ -113,15 +113,15 @@ lower_load_to_scalar(nir_builder *b, nir_intrinsic_instr *intr) nir_src_copy(&chan_intr->src[j], &intr->src[j], &chan_intr->instr); /* increment offset per component */ - nir_def *offset = nir_iadd_imm(b, base_offset, i * (intr->dest.ssa.bit_size / 8)); + nir_def *offset = nir_iadd_imm(b, base_offset, i * (intr->def.bit_size / 8)); *nir_get_io_offset_src(chan_intr) = nir_src_for_ssa(offset); nir_builder_instr_insert(b, &chan_intr->instr); - loads[i] = &chan_intr->dest.ssa; + loads[i] = &chan_intr->def; } - nir_def_rewrite_uses(&intr->dest.ssa, + nir_def_rewrite_uses(&intr->def, nir_vec(b, loads, intr->num_components)); nir_instr_remove(&intr->instr); } @@ -380,15 +380,15 @@ lower_load_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr, nir_intrinsic_instr *chan_intr = nir_intrinsic_instr_create(b->shader, intr->intrinsic); - nir_def_init(&chan_intr->instr, &chan_intr->dest.ssa, 1, - intr->dest.ssa.bit_size); + nir_def_init(&chan_intr->instr, &chan_intr->def, 1, + intr->def.bit_size); chan_intr->num_components = 1; nir_deref_instr *deref = nir_build_deref_var(b, chan_var); deref = clone_deref_array(b, deref, nir_src_as_deref(intr->src[0])); - chan_intr->src[0] = nir_src_for_ssa(&deref->dest.ssa); + chan_intr->src[0] = nir_src_for_ssa(&deref->def); if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset || intr->intrinsic == nir_intrinsic_interp_deref_at_sample || @@ -397,10 +397,10 @@ lower_load_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr, nir_builder_instr_insert(b, &chan_intr->instr); - loads[i] = &chan_intr->dest.ssa; + loads[i] = &chan_intr->def; } - nir_def_rewrite_uses(&intr->dest.ssa, + nir_def_rewrite_uses(&intr->def, nir_vec(b, loads, intr->num_components)); /* Remove the old load intrinsic */ @@ -442,7 +442,7 @@ lower_store_output_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr, deref = clone_deref_array(b, deref, nir_src_as_deref(intr->src[0])); - chan_intr->src[0] = nir_src_for_ssa(&deref->dest.ssa); + chan_intr->src[0] = nir_src_for_ssa(&deref->def); chan_intr->src[1] = nir_src_for_ssa(nir_channel(b, value, i)); nir_builder_instr_insert(b, &chan_intr->instr); diff --git a/src/compiler/nir/nir_lower_io_to_temporaries.c b/src/compiler/nir/nir_lower_io_to_temporaries.c index c2ac524..7a26be9 100644 --- a/src/compiler/nir/nir_lower_io_to_temporaries.c +++ b/src/compiler/nir/nir_lower_io_to_temporaries.c @@ -198,7 +198,7 @@ emit_interp(nir_builder *b, nir_deref_instr **old_interp_deref, nir_intrinsic_instr *new_interp = nir_intrinsic_instr_create(b->shader, interp->intrinsic); - new_interp->src[0] = nir_src_for_ssa(&new_interp_deref->dest.ssa); + new_interp->src[0] = nir_src_for_ssa(&new_interp_deref->def); if (interp->intrinsic == nir_intrinsic_interp_deref_at_sample || interp->intrinsic == nir_intrinsic_interp_deref_at_offset || interp->intrinsic == nir_intrinsic_interp_deref_at_vertex) { @@ -206,12 +206,12 @@ emit_interp(nir_builder *b, nir_deref_instr **old_interp_deref, } new_interp->num_components = interp->num_components; - nir_def_init(&new_interp->instr, &new_interp->dest.ssa, - interp->dest.ssa.num_components, interp->dest.ssa.bit_size); + nir_def_init(&new_interp->instr, &new_interp->def, + interp->def.num_components, interp->def.bit_size); nir_builder_instr_insert(b, &new_interp->instr); - nir_store_deref(b, temp_deref, &new_interp->dest.ssa, - (1 << interp->dest.ssa.num_components) - 1); + nir_store_deref(b, temp_deref, &new_interp->def, + (1 << interp->def.num_components) - 1); } static void @@ -244,7 +244,7 @@ fixup_interpolation_instr(struct lower_io_state *state, * correct part of the temporary. */ nir_def *load = nir_load_deref(b, nir_src_as_deref(interp->src[0])); - nir_def_rewrite_uses(&interp->dest.ssa, load); + nir_def_rewrite_uses(&interp->def, load); nir_instr_remove(&interp->instr); nir_deref_path_finish(&interp_path); diff --git a/src/compiler/nir/nir_lower_io_to_vector.c b/src/compiler/nir/nir_lower_io_to_vector.c index dc8b572..4d9ba7f 100644 --- a/src/compiler/nir/nir_lower_io_to_vector.c +++ b/src/compiler/nir/nir_lower_io_to_vector.c @@ -344,7 +344,7 @@ build_array_index(nir_builder *b, nir_deref_instr *deref, nir_def *base, return base; case nir_deref_type_array: { nir_def *index = nir_i2iN(b, deref->arr.index.ssa, - deref->dest.ssa.bit_size); + deref->def.bit_size); if (nir_deref_instr_parent(deref)->deref_type == nir_deref_type_var && per_vertex) @@ -505,17 +505,17 @@ nir_lower_io_to_vector_impl(nir_function_impl *impl, nir_variable_mode modes) assert(glsl_type_is_vector(new_deref->type)); } nir_instr_rewrite_src(&intrin->instr, &intrin->src[0], - nir_src_for_ssa(&new_deref->dest.ssa)); + nir_src_for_ssa(&new_deref->def)); intrin->num_components = glsl_get_components(new_deref->type); - intrin->dest.ssa.num_components = intrin->num_components; + intrin->def.num_components = intrin->num_components; b.cursor = nir_after_instr(&intrin->instr); - nir_def *new_vec = nir_channels(&b, &intrin->dest.ssa, + nir_def *new_vec = nir_channels(&b, &intrin->def, vec4_comp_mask >> new_frac); - nir_def_rewrite_uses_after(&intrin->dest.ssa, + nir_def_rewrite_uses_after(&intrin->def, new_vec, new_vec->parent_instr); @@ -552,7 +552,7 @@ nir_lower_io_to_vector_impl(nir_function_impl *impl, nir_variable_mode modes) assert(glsl_type_is_vector(new_deref->type)); } nir_instr_rewrite_src(&intrin->instr, &intrin->src[0], - nir_src_for_ssa(&new_deref->dest.ssa)); + nir_src_for_ssa(&new_deref->def)); intrin->num_components = glsl_get_components(new_deref->type); @@ -649,7 +649,7 @@ nir_vectorize_tess_levels_impl(nir_function_impl *impl) unsigned vec_size = glsl_get_vector_elements(var->type); b.cursor = nir_before_instr(instr); - nir_def *new_deref = &nir_build_deref_var(&b, var)->dest.ssa; + nir_def *new_deref = &nir_build_deref_var(&b, var)->def; nir_instr_rewrite_src(instr, &intrin->src[0], nir_src_for_ssa(new_deref)); nir_deref_instr_remove_if_unused(deref); @@ -661,7 +661,7 @@ nir_vectorize_tess_levels_impl(nir_function_impl *impl) if (intrin->intrinsic == nir_intrinsic_load_deref) { /* Return undef from out of bounds loads. */ b.cursor = nir_after_instr(instr); - nir_def *val = &intrin->dest.ssa; + nir_def *val = &intrin->def; nir_def *u = nir_undef(&b, val->num_components, val->bit_size); nir_def_rewrite_uses(val, u); } @@ -679,7 +679,7 @@ nir_vectorize_tess_levels_impl(nir_function_impl *impl) nir_instr_rewrite_src(instr, &intrin->src[1], nir_src_for_ssa(new_val)); } else { b.cursor = nir_after_instr(instr); - nir_def *val = &intrin->dest.ssa; + nir_def *val = &intrin->def; val->num_components = intrin->num_components; nir_def *comp = nir_channel(&b, val, index); nir_def_rewrite_uses_after(val, comp, comp->parent_instr); diff --git a/src/compiler/nir/nir_lower_is_helper_invocation.c b/src/compiler/nir/nir_lower_is_helper_invocation.c index 53dd3bd..a85e582 100644 --- a/src/compiler/nir/nir_lower_is_helper_invocation.c +++ b/src/compiler/nir/nir_lower_is_helper_invocation.c @@ -64,7 +64,7 @@ nir_lower_load_and_store_is_helper(nir_builder *b, nir_instr *instr, void *data) case nir_intrinsic_is_helper_invocation: { b->cursor = nir_before_instr(instr); nir_def *is_helper = nir_load_deref(b, is_helper_deref); - nir_def_rewrite_uses(&intrin->dest.ssa, is_helper); + nir_def_rewrite_uses(&intrin->def, is_helper); nir_instr_remove_v(instr); return true; } diff --git a/src/compiler/nir/nir_lower_locals_to_regs.c b/src/compiler/nir/nir_lower_locals_to_regs.c index 799cdbf..a9b9085 100644 --- a/src/compiler/nir/nir_lower_locals_to_regs.c +++ b/src/compiler/nir/nir_lower_locals_to_regs.c @@ -234,7 +234,7 @@ lower_locals_to_regs_block(nir_block *block, loc.reg, .base = loc.base_offset); } - nir_def_rewrite_uses(&intrin->dest.ssa, value); + nir_def_rewrite_uses(&intrin->def, value); nir_instr_remove(&intrin->instr); state->progress = true; break; diff --git a/src/compiler/nir/nir_lower_mediump.c b/src/compiler/nir/nir_lower_mediump.c index 44771a4..a5a65a2 100644 --- a/src/compiler/nir/nir_lower_mediump.c +++ b/src/compiler/nir/nir_lower_mediump.c @@ -240,10 +240,10 @@ nir_lower_mediump_io(nir_shader *nir, nir_variable_mode modes, /* Convert the 32-bit load into a 16-bit load. */ b.cursor = nir_after_instr(&intr->instr); - intr->dest.ssa.bit_size = 16; + intr->def.bit_size = 16; nir_intrinsic_set_dest_type(intr, (type & ~32) | 16); - nir_def *dst = convert(&b, &intr->dest.ssa); - nir_def_rewrite_uses_after(&intr->dest.ssa, dst, + nir_def *dst = convert(&b, &intr->def); + nir_def_rewrite_uses_after(&intr->def, dst, dst->parent_instr); } @@ -452,32 +452,32 @@ nir_lower_mediump_vars_impl(nir_function_impl *impl, nir_variable_mode modes, switch (intrin->intrinsic) { case nir_intrinsic_load_deref: { - if (intrin->dest.ssa.bit_size != 32) + if (intrin->def.bit_size != 32) break; nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]); if (glsl_get_bit_size(deref->type) != 16) break; - intrin->dest.ssa.bit_size = 16; + intrin->def.bit_size = 16; b.cursor = nir_after_instr(&intrin->instr); nir_def *replace = NULL; switch (glsl_get_base_type(deref->type)) { case GLSL_TYPE_FLOAT16: - replace = nir_f2f32(&b, &intrin->dest.ssa); + replace = nir_f2f32(&b, &intrin->def); break; case GLSL_TYPE_INT16: - replace = nir_i2i32(&b, &intrin->dest.ssa); + replace = nir_i2i32(&b, &intrin->def); break; case GLSL_TYPE_UINT16: - replace = nir_u2u32(&b, &intrin->dest.ssa); + replace = nir_u2u32(&b, &intrin->def); break; default: unreachable("Invalid 16-bit type"); } - nir_def_rewrite_uses_after(&intrin->dest.ssa, + nir_def_rewrite_uses_after(&intrin->def, replace, replace->parent_instr); progress = true; @@ -883,7 +883,7 @@ fold_16bit_image_dest(nir_intrinsic_instr *instr, unsigned exec_mode, if (!(nir_alu_type_get_base_type(dest_type) & allowed_types)) return false; - if (!fold_16bit_destination(&instr->dest.ssa, dest_type, exec_mode, rdm)) + if (!fold_16bit_destination(&instr->def, dest_type, exec_mode, rdm)) return false; nir_intrinsic_set_dest_type(instr, (dest_type & ~32) | 16); @@ -913,7 +913,7 @@ fold_16bit_tex_dest(nir_tex_instr *tex, unsigned exec_mode, if (!(nir_alu_type_get_base_type(tex->dest_type) & allowed_types)) return false; - if (!fold_16bit_destination(&tex->dest.ssa, tex->dest_type, exec_mode, rdm)) + if (!fold_16bit_destination(&tex->def, tex->dest_type, exec_mode, rdm)) return false; tex->dest_type = (tex->dest_type & ~32) | 16; diff --git a/src/compiler/nir/nir_lower_mem_access_bit_sizes.c b/src/compiler/nir/nir_lower_mem_access_bit_sizes.c index 286f0ab..f327b27 100644 --- a/src/compiler/nir/nir_lower_mem_access_bit_sizes.c +++ b/src/compiler/nir/nir_lower_mem_access_bit_sizes.c @@ -58,7 +58,7 @@ dup_mem_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin, nir_intrinsic_set_align(dup, align_mul, align_offset); if (info->has_dest) { - nir_def_init(&dup->instr, &dup->dest.ssa, num_components, bit_size); + nir_def_init(&dup->instr, &dup->def, num_components, bit_size); } else { nir_intrinsic_set_write_mask(dup, (1 << num_components) - 1); } @@ -73,8 +73,8 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin, nir_lower_mem_access_bit_sizes_cb mem_access_size_align_cb, const void *cb_data) { - const unsigned bit_size = intrin->dest.ssa.bit_size; - const unsigned num_components = intrin->dest.ssa.num_components; + const unsigned bit_size = intrin->def.bit_size; + const unsigned num_components = intrin->def.num_components; const unsigned bytes_read = num_components * (bit_size / 8); const uint32_t align_mul = nir_intrinsic_align_mul(intrin); const uint32_t whole_align_offset = nir_intrinsic_align_offset(intrin); @@ -136,22 +136,22 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin, chunk_bytes = MIN2(bytes_left, requested_bytes - max_pad); nir_def *shift = nir_imul_imm(b, pad, 8); - nir_def *shifted = nir_ushr(b, &load->dest.ssa, shift); + nir_def *shifted = nir_ushr(b, &load->def, shift); - if (load->dest.ssa.num_components > 1) { + if (load->def.num_components > 1) { nir_def *rev_shift = - nir_isub_imm(b, load->dest.ssa.bit_size, shift); - nir_def *rev_shifted = nir_ishl(b, &load->dest.ssa, rev_shift); + nir_isub_imm(b, load->def.bit_size, shift); + nir_def *rev_shifted = nir_ishl(b, &load->def, rev_shift); nir_def *comps[NIR_MAX_VEC_COMPONENTS]; - for (unsigned i = 1; i < load->dest.ssa.num_components; i++) + for (unsigned i = 1; i < load->def.num_components; i++) comps[i - 1] = nir_channel(b, rev_shifted, i); - comps[load->dest.ssa.num_components - 1] = - nir_imm_zero(b, 1, load->dest.ssa.bit_size); + comps[load->def.num_components - 1] = + nir_imm_zero(b, 1, load->def.bit_size); - rev_shifted = nir_vec(b, comps, load->dest.ssa.num_components); - shifted = nir_bcsel(b, nir_ieq_imm(b, shift, 0), &load->dest.ssa, + rev_shifted = nir_vec(b, comps, load->def.num_components); + shifted = nir_bcsel(b, nir_ieq_imm(b, shift, 0), &load->def, nir_ior(b, shifted, rev_shifted)); } @@ -192,7 +192,7 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin, /* There's no guarantee that chunk_num_components is a valid NIR * vector size, so just loop one chunk component at a time */ - nir_def *chunk_data = &load->dest.ssa; + nir_def *chunk_data = &load->def; for (unsigned i = 0; i < chunk_num_components; i++) { assert(num_chunks < ARRAY_SIZE(chunks)); chunks[num_chunks++] = @@ -209,7 +209,7 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin, chunk_bytes = requested.num_components * (requested.bit_size / 8); assert(num_chunks < ARRAY_SIZE(chunks)); - chunks[num_chunks++] = &load->dest.ssa; + chunks[num_chunks++] = &load->def; } chunk_start += chunk_bytes; @@ -217,7 +217,7 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin, nir_def *result = nir_extract_bits(b, chunks, num_chunks, 0, num_components, bit_size); - nir_def_rewrite_uses(&intrin->dest.ssa, result); + nir_def_rewrite_uses(&intrin->def, result); nir_instr_remove(&intrin->instr); return true; diff --git a/src/compiler/nir/nir_lower_memcpy.c b/src/compiler/nir/nir_lower_memcpy.c index 31f27c8f..7c1dc91 100644 --- a/src/compiler/nir/nir_lower_memcpy.c +++ b/src/compiler/nir/nir_lower_memcpy.c @@ -55,7 +55,7 @@ memcpy_load_deref_elem(nir_builder *b, nir_deref_instr *parent, { nir_deref_instr *deref; - index = nir_i2iN(b, index, parent->dest.ssa.bit_size); + index = nir_i2iN(b, index, parent->def.bit_size); assert(parent->deref_type == nir_deref_type_cast); deref = nir_build_deref_ptr_as_array(b, parent, index); @@ -66,7 +66,7 @@ static nir_def * memcpy_load_deref_elem_imm(nir_builder *b, nir_deref_instr *parent, uint64_t index) { - nir_def *idx = nir_imm_intN_t(b, index, parent->dest.ssa.bit_size); + nir_def *idx = nir_imm_intN_t(b, index, parent->def.bit_size); return memcpy_load_deref_elem(b, parent, idx); } @@ -76,7 +76,7 @@ memcpy_store_deref_elem(nir_builder *b, nir_deref_instr *parent, { nir_deref_instr *deref; - index = nir_i2iN(b, index, parent->dest.ssa.bit_size); + index = nir_i2iN(b, index, parent->def.bit_size); assert(parent->deref_type == nir_deref_type_cast); deref = nir_build_deref_ptr_as_array(b, parent, index); nir_store_deref(b, deref, value, ~0); @@ -86,7 +86,7 @@ static void memcpy_store_deref_elem_imm(nir_builder *b, nir_deref_instr *parent, uint64_t index, nir_def *value) { - nir_def *idx = nir_imm_intN_t(b, index, parent->dest.ssa.bit_size); + nir_def *idx = nir_imm_intN_t(b, index, parent->def.bit_size); memcpy_store_deref_elem(b, parent, idx, value); } @@ -127,10 +127,10 @@ lower_memcpy_impl(nir_function_impl *impl) copy_type_for_byte_size(copy_size); nir_deref_instr *copy_dst = - nir_build_deref_cast(&b, &dst->dest.ssa, dst->modes, + nir_build_deref_cast(&b, &dst->def, dst->modes, copy_type, copy_size); nir_deref_instr *copy_src = - nir_build_deref_cast(&b, &src->dest.ssa, src->modes, + nir_build_deref_cast(&b, &src->def, src->modes, copy_type, copy_size); uint64_t index = offset / copy_size; @@ -147,10 +147,10 @@ lower_memcpy_impl(nir_function_impl *impl) * emit a loop which copies one byte at a time. */ nir_deref_instr *copy_dst = - nir_build_deref_cast(&b, &dst->dest.ssa, dst->modes, + nir_build_deref_cast(&b, &dst->def, dst->modes, glsl_uint8_t_type(), 1); nir_deref_instr *copy_src = - nir_build_deref_cast(&b, &src->dest.ssa, src->modes, + nir_build_deref_cast(&b, &src->def, src->modes, glsl_uint8_t_type(), 1); nir_variable *i = nir_local_variable_create(impl, diff --git a/src/compiler/nir/nir_lower_multiview.c b/src/compiler/nir/nir_lower_multiview.c index d24acdb..a4bcdae 100644 --- a/src/compiler/nir/nir_lower_multiview.c +++ b/src/compiler/nir/nir_lower_multiview.c @@ -275,7 +275,7 @@ nir_lower_multiview(nir_shader *shader, uint32_t view_mask) switch (intrin->intrinsic) { case nir_intrinsic_load_view_index: { - nir_def_rewrite_uses(&intrin->dest.ssa, view_index); + nir_def_rewrite_uses(&intrin->def, view_index); break; } @@ -285,7 +285,7 @@ nir_lower_multiview(nir_shader *shader, uint32_t view_mask) nir_deref_instr *old_deref = nir_src_as_deref(intrin->src[0]); nir_instr_rewrite_src(instr, &intrin->src[0], - nir_src_for_ssa(&pos_deref->dest.ssa)); + nir_src_for_ssa(&pos_deref->def)); /* Remove old deref since it has the wrong type. */ nir_deref_instr_remove_if_unused(old_deref); diff --git a/src/compiler/nir/nir_lower_non_uniform_access.c b/src/compiler/nir/nir_lower_non_uniform_access.c index 81dece3..41ad860 100644 --- a/src/compiler/nir/nir_lower_non_uniform_access.c +++ b/src/compiler/nir/nir_lower_non_uniform_access.c @@ -95,7 +95,7 @@ nu_handle_rewrite(nir_builder *b, struct nu_handle *h) /* Replicate the deref. */ nir_deref_instr *deref = nir_build_deref_array(b, h->parent_deref, h->first); - *(h->src) = nir_src_for_ssa(&deref->dest.ssa); + *(h->src) = nir_src_for_ssa(&deref->def); } else { *(h->src) = nir_src_for_ssa(h->first); } diff --git a/src/compiler/nir/nir_lower_patch_vertices.c b/src/compiler/nir/nir_lower_patch_vertices.c index 01c6e24..eb94f68 100644 --- a/src/compiler/nir/nir_lower_patch_vertices.c +++ b/src/compiler/nir/nir_lower_patch_vertices.c @@ -84,7 +84,7 @@ nir_lower_patch_vertices(nir_shader *nir, } progress = true; - nir_def_rewrite_uses(&intr->dest.ssa, + nir_def_rewrite_uses(&intr->def, val); nir_instr_remove(instr); } diff --git a/src/compiler/nir/nir_lower_phis_to_scalar.c b/src/compiler/nir/nir_lower_phis_to_scalar.c index 5b5a69f..41b6c25 100644 --- a/src/compiler/nir/nir_lower_phis_to_scalar.c +++ b/src/compiler/nir/nir_lower_phis_to_scalar.c @@ -139,7 +139,7 @@ static bool should_lower_phi(nir_phi_instr *phi, struct lower_phis_to_scalar_state *state) { /* Already scalar */ - if (phi->dest.ssa.num_components == 1) + if (phi->def.num_components == 1) return false; if (state->lower_all) @@ -192,24 +192,24 @@ lower_phis_to_scalar_block(nir_block *block, if (!should_lower_phi(phi, state)) continue; - unsigned bit_size = phi->dest.ssa.bit_size; + unsigned bit_size = phi->def.bit_size; /* Create a vecN operation to combine the results. Most of these * will be redundant, but copy propagation should clean them up for * us. No need to add the complexity here. */ - nir_op vec_op = nir_op_vec(phi->dest.ssa.num_components); + nir_op vec_op = nir_op_vec(phi->def.num_components); nir_alu_instr *vec = nir_alu_instr_create(state->shader, vec_op); nir_def_init(&vec->instr, &vec->def, - phi->dest.ssa.num_components, bit_size); + phi->def.num_components, bit_size); - for (unsigned i = 0; i < phi->dest.ssa.num_components; i++) { + for (unsigned i = 0; i < phi->def.num_components; i++) { nir_phi_instr *new_phi = nir_phi_instr_create(state->shader); - nir_def_init(&new_phi->instr, &new_phi->dest.ssa, 1, - phi->dest.ssa.bit_size); + nir_def_init(&new_phi->instr, &new_phi->def, 1, + phi->def.bit_size); - vec->src[i].src = nir_src_for_ssa(&new_phi->dest.ssa); + vec->src[i].src = nir_src_for_ssa(&new_phi->def); nir_foreach_phi_src(src, phi) { /* We need to insert a mov to grab the i'th component of src */ @@ -234,7 +234,7 @@ lower_phis_to_scalar_block(nir_block *block, nir_instr_insert_after(&last_phi->instr, &vec->instr); - nir_def_rewrite_uses(&phi->dest.ssa, + nir_def_rewrite_uses(&phi->def, &vec->def); nir_instr_remove(&phi->instr); diff --git a/src/compiler/nir/nir_lower_pntc_ytransform.c b/src/compiler/nir/nir_lower_pntc_ytransform.c index 259ccc3..9ccd03d 100644 --- a/src/compiler/nir/nir_lower_pntc_ytransform.c +++ b/src/compiler/nir/nir_lower_pntc_ytransform.c @@ -60,7 +60,7 @@ lower_load_pointcoord(lower_pntc_ytransform_state *state, nir_builder *b = &state->b; b->cursor = nir_after_instr(&intr->instr); - nir_def *pntc = &intr->dest.ssa; + nir_def *pntc = &intr->def; nir_def *transform = get_pntc_transform(state); nir_def *y = nir_channel(b, pntc, 1); /* The offset is 1 if we're flipping, 0 otherwise. */ @@ -73,7 +73,7 @@ lower_load_pointcoord(lower_pntc_ytransform_state *state, nir_channel(b, pntc, 0), nir_fadd(b, offset, scaled)); - nir_def_rewrite_uses_after(&intr->dest.ssa, flipped_pntc, + nir_def_rewrite_uses_after(&intr->def, flipped_pntc, flipped_pntc->parent_instr); } diff --git a/src/compiler/nir/nir_lower_printf.c b/src/compiler/nir/nir_lower_printf.c index 65ea6ed..7b86eea 100644 --- a/src/compiler/nir/nir_lower_printf.c +++ b/src/compiler/nir/nir_lower_printf.c @@ -62,12 +62,12 @@ lower_printf_instr(nir_builder *b, nir_instr *instr, void *_options) /* Increment the counter at the beginning of the buffer */ const unsigned counter_size = 4; nir_deref_instr *counter = nir_build_deref_array_imm(b, buffer, 0); - counter = nir_build_deref_cast(b, &counter->dest.ssa, + counter = nir_build_deref_cast(b, &counter->def, nir_var_mem_global, glsl_uint_type(), 0); counter->cast.align_mul = 4; nir_def *offset = - nir_deref_atomic(b, 32, &counter->dest.ssa, + nir_deref_atomic(b, 32, &counter->def, nir_imm_int(b, fmt_str_id_size + args_size), .atomic_op = nir_atomic_op_iadd); @@ -85,7 +85,7 @@ lower_printf_instr(nir_builder *b, nir_instr *instr, void *_options) nir_i2iN(b, offset, ptr_bit_size); nir_deref_instr *fmt_str_id_deref = nir_build_deref_array(b, buffer, fmt_str_id_offset); - fmt_str_id_deref = nir_build_deref_cast(b, &fmt_str_id_deref->dest.ssa, + fmt_str_id_deref = nir_build_deref_cast(b, &fmt_str_id_deref->def, nir_var_mem_global, glsl_uint_type(), 0); fmt_str_id_deref->cast.align_mul = 4; @@ -115,7 +115,7 @@ lower_printf_instr(nir_builder *b, nir_instr *instr, void *_options) ptr_bit_size); nir_deref_instr *dst_arg_deref = nir_build_deref_array(b, buffer, arg_offset); - dst_arg_deref = nir_build_deref_cast(b, &dst_arg_deref->dest.ssa, + dst_arg_deref = nir_build_deref_cast(b, &dst_arg_deref->def, nir_var_mem_global, arg_type, 0); assert(field_offset % 4 == 0); dst_arg_deref->cast.align_mul = 4; @@ -127,7 +127,7 @@ lower_printf_instr(nir_builder *b, nir_instr *instr, void *_options) nir_pop_if(b, NULL); nir_def *ret_val = nir_if_phi(b, printf_succ_val, printf_fail_val); - nir_def_rewrite_uses(&prntf->dest.ssa, ret_val); + nir_def_rewrite_uses(&prntf->def, ret_val); nir_instr_remove(&prntf->instr); return true; diff --git a/src/compiler/nir/nir_lower_readonly_images_to_tex.c b/src/compiler/nir/nir_lower_readonly_images_to_tex.c index ce9ddb6..43a1271 100644 --- a/src/compiler/nir/nir_lower_readonly_images_to_tex.c +++ b/src/compiler/nir/nir_lower_readonly_images_to_tex.c @@ -128,7 +128,7 @@ lower_readonly_image_instr_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin coord_components++; tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &deref->dest.ssa); + &deref->def); if (options->per_variable) { assert(nir_deref_instr_get_variable(deref)); @@ -148,7 +148,7 @@ lower_readonly_image_instr_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin assert(num_srcs == 3); tex->dest_type = nir_intrinsic_dest_type(intrin); - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); break; } @@ -159,7 +159,7 @@ lower_readonly_image_instr_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin assert(num_srcs == 2); tex->dest_type = nir_type_uint32; - nir_def_init(&tex->instr, &tex->dest.ssa, coord_components, 32); + nir_def_init(&tex->instr, &tex->def, coord_components, 32); break; } @@ -169,10 +169,10 @@ lower_readonly_image_instr_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin nir_builder_instr_insert(b, &tex->instr); - nir_def *res = nir_trim_vector(b, &tex->dest.ssa, - intrin->dest.ssa.num_components); + nir_def *res = nir_trim_vector(b, &tex->def, + intrin->def.num_components); - nir_def_rewrite_uses(&intrin->dest.ssa, res); + nir_def_rewrite_uses(&intrin->def, res); nir_instr_remove(&intrin->instr); return true; diff --git a/src/compiler/nir/nir_lower_reg_intrinsics_to_ssa.c b/src/compiler/nir/nir_lower_reg_intrinsics_to_ssa.c index 980b8a6..8337194 100644 --- a/src/compiler/nir/nir_lower_reg_intrinsics_to_ssa.c +++ b/src/compiler/nir/nir_lower_reg_intrinsics_to_ssa.c @@ -36,7 +36,7 @@ struct regs_to_ssa_state { static void setup_reg(nir_intrinsic_instr *decl, struct regs_to_ssa_state *state) { - assert(state->values[decl->dest.ssa.index] == NULL); + assert(state->values[decl->def.index] == NULL); if (!should_lower_reg(decl)) return; @@ -48,7 +48,7 @@ setup_reg(nir_intrinsic_instr *decl, struct regs_to_ssa_state *state) nir_foreach_reg_store(store, decl) BITSET_SET(state->defs, store->parent_instr->block->index); - state->values[decl->dest.ssa.index] = + state->values[decl->def.index] = nir_phi_builder_add_value(state->phi_builder, num_components, bit_size, state->defs); } @@ -66,10 +66,10 @@ rewrite_load(nir_intrinsic_instr *load, struct regs_to_ssa_state *state) nir_intrinsic_instr *decl = nir_instr_as_intrinsic(reg->parent_instr); nir_def *def = nir_phi_builder_value_get_block_def(value, block); - nir_def_rewrite_uses(&load->dest.ssa, def); + nir_def_rewrite_uses(&load->def, def); nir_instr_remove(&load->instr); - if (nir_def_is_unused(&decl->dest.ssa)) + if (nir_def_is_unused(&decl->def)) nir_instr_remove(&decl->instr); } @@ -109,7 +109,7 @@ rewrite_store(nir_intrinsic_instr *store, struct regs_to_ssa_state *state) nir_phi_builder_value_set_block_def(value, block, new_value); nir_instr_remove(&store->instr); - if (nir_def_is_unused(&decl->dest.ssa)) + if (nir_def_is_unused(&decl->def)) nir_instr_remove(&decl->instr); } diff --git a/src/compiler/nir/nir_lower_robust_access.c b/src/compiler/nir/nir_lower_robust_access.c index 2cf291e..d808711 100644 --- a/src/compiler/nir/nir_lower_robust_access.c +++ b/src/compiler/nir/nir_lower_robust_access.c @@ -37,8 +37,8 @@ wrap_in_if(nir_builder *b, nir_intrinsic_instr *instr, nir_def *valid) nir_def *res, *zero; if (has_dest) { - zero = nir_imm_zero(b, instr->dest.ssa.num_components, - instr->dest.ssa.bit_size); + zero = nir_imm_zero(b, instr->def.num_components, + instr->def.bit_size); } nir_push_if(b, valid); @@ -47,12 +47,12 @@ wrap_in_if(nir_builder *b, nir_intrinsic_instr *instr, nir_def *valid) nir_builder_instr_insert(b, orig); if (has_dest) - res = &nir_instr_as_intrinsic(orig)->dest.ssa; + res = &nir_instr_as_intrinsic(orig)->def; } nir_pop_if(b, NULL); if (has_dest) - nir_def_rewrite_uses(&instr->dest.ssa, nir_if_phi(b, res, zero)); + nir_def_rewrite_uses(&instr->def, nir_if_phi(b, res, zero)); /* We've cloned and wrapped, so drop original instruction */ nir_instr_remove(&instr->instr); @@ -63,7 +63,7 @@ lower_buffer_load(nir_builder *b, nir_intrinsic_instr *instr, const nir_lower_robust_access_options *opts) { - uint32_t type_sz = instr->dest.ssa.bit_size / 8; + uint32_t type_sz = instr->def.bit_size / 8; nir_def *size; nir_def *index = instr->src[0].ssa; @@ -96,7 +96,7 @@ lower_buffer_shared(nir_builder *b, nir_intrinsic_instr *instr) uint32_t type_sz, offset_src; if (instr->intrinsic == nir_intrinsic_load_shared) { offset_src = 0; - type_sz = instr->dest.ssa.bit_size / 8; + type_sz = instr->def.bit_size / 8; } else if (instr->intrinsic == nir_intrinsic_store_shared) { offset_src = 1; type_sz = nir_src_bit_size(instr->src[0]) / 8; diff --git a/src/compiler/nir/nir_lower_scratch.c b/src/compiler/nir/nir_lower_scratch.c index ac0a623..0ac449d 100644 --- a/src/compiler/nir/nir_lower_scratch.c +++ b/src/compiler/nir/nir_lower_scratch.c @@ -48,13 +48,13 @@ lower_load_store(nir_builder *b, size_align(deref->type, &size, &align); if (intrin->intrinsic == nir_intrinsic_load_deref) { - unsigned bit_size = intrin->dest.ssa.bit_size; + unsigned bit_size = intrin->def.bit_size; nir_def *value = nir_load_scratch( b, intrin->num_components, bit_size == 1 ? 32 : bit_size, offset, .align_mul = align); if (bit_size == 1) value = nir_b2b1(b, value); - nir_def_rewrite_uses(&intrin->dest.ssa, value); + nir_def_rewrite_uses(&intrin->def, value); } else { assert(intrin->intrinsic == nir_intrinsic_store_deref); @@ -73,7 +73,7 @@ lower_load_store(nir_builder *b, static bool only_used_for_load_store(nir_deref_instr *deref) { - nir_foreach_use(src, &deref->dest.ssa) { + nir_foreach_use(src, &deref->def) { if (!src->parent_instr) return false; if (src->parent_instr->type == nir_instr_type_deref) { diff --git a/src/compiler/nir/nir_lower_shader_calls.c b/src/compiler/nir/nir_lower_shader_calls.c index a120dee..ca5ad40 100644 --- a/src/compiler/nir/nir_lower_shader_calls.c +++ b/src/compiler/nir/nir_lower_shader_calls.c @@ -911,7 +911,7 @@ rewrite_phis_to_pred(nir_block *block, nir_block *pred) nir_foreach_phi_src(phi_src, phi) { if (phi_src->pred == pred) { found = true; - nir_def_rewrite_uses(&phi->dest.ssa, phi_src->src.ssa); + nir_def_rewrite_uses(&phi->def, phi_src->src.ssa); break; } } @@ -1350,8 +1350,8 @@ lower_stack_instr_to_scratch(struct nir_builder *b, nir_instr *instr, void *data nir_intrinsic_base(stack)); data = nir_load_global(b, addr, nir_intrinsic_align_mul(stack), - stack->dest.ssa.num_components, - stack->dest.ssa.bit_size); + stack->def.num_components, + stack->def.bit_size); } else { assert(state->address_format == nir_address_format_32bit_offset); data = nir_load_scratch(b, @@ -1579,7 +1579,7 @@ nir_opt_trim_stack_values(nir_shader *shader) } } - intrin->dest.ssa.num_components = intrin->num_components = swiz_count; + intrin->def.num_components = intrin->num_components = swiz_count; progress = true; } @@ -1817,7 +1817,7 @@ nir_opt_stack_loads(nir_shader *shader) if (intrin->intrinsic != nir_intrinsic_load_stack) continue; - nir_def *value = &intrin->dest.ssa; + nir_def *value = &intrin->def; nir_block *new_block = find_last_dominant_use_block(impl, value); if (new_block == block) continue; @@ -1854,7 +1854,7 @@ split_stack_components_instr(struct nir_builder *b, nir_instr *instr, void *data return false; if (intrin->intrinsic == nir_intrinsic_load_stack && - intrin->dest.ssa.num_components == 1) + intrin->def.num_components == 1) return false; if (intrin->intrinsic == nir_intrinsic_store_stack && @@ -1867,18 +1867,18 @@ split_stack_components_instr(struct nir_builder *b, nir_instr *instr, void *data nir_def *components[NIR_MAX_VEC_COMPONENTS] = { 0, }; - for (unsigned c = 0; c < intrin->dest.ssa.num_components; c++) { - components[c] = nir_load_stack(b, 1, intrin->dest.ssa.bit_size, + for (unsigned c = 0; c < intrin->def.num_components; c++) { + components[c] = nir_load_stack(b, 1, intrin->def.bit_size, .base = nir_intrinsic_base(intrin) + - c * intrin->dest.ssa.bit_size / 8, + c * intrin->def.bit_size / 8, .call_idx = nir_intrinsic_call_idx(intrin), .value_id = nir_intrinsic_value_id(intrin), .align_mul = nir_intrinsic_align_mul(intrin)); } - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, nir_vec(b, components, - intrin->dest.ssa.num_components)); + intrin->def.num_components)); } else { assert(intrin->intrinsic == nir_intrinsic_store_stack); for (unsigned c = 0; c < intrin->src[0].ssa->num_components; c++) { diff --git a/src/compiler/nir/nir_lower_single_sampled.c b/src/compiler/nir/nir_lower_single_sampled.c index c87624e..c0f5500 100644 --- a/src/compiler/nir/nir_lower_single_sampled.c +++ b/src/compiler/nir/nir_lower_single_sampled.c @@ -83,7 +83,7 @@ lower_single_sampled_instr(nir_builder *b, return false; } - nir_def_rewrite_uses(&intrin->dest.ssa, lowered); + nir_def_rewrite_uses(&intrin->def, lowered); nir_instr_remove(instr); return true; } diff --git a/src/compiler/nir/nir_lower_ssbo.c b/src/compiler/nir/nir_lower_ssbo.c index ad9550c..820634c 100644 --- a/src/compiler/nir/nir_lower_ssbo.c +++ b/src/compiler/nir/nir_lower_ssbo.c @@ -66,9 +66,9 @@ nir_load_ssbo_prop(nir_builder *b, nir_intrinsic_op op, nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op); load->num_components = 1; nir_src_copy(&load->src[0], idx, &load->instr); - nir_def_init(&load->instr, &load->dest.ssa, 1, bitsize); + nir_def_init(&load->instr, &load->def, 1, bitsize); nir_builder_instr_insert(b, &load->instr); - return &load->dest.ssa; + return &load->def; } #define nir_ssbo_prop(b, prop, index, bitsize) \ @@ -115,8 +115,8 @@ lower_ssbo_instr(nir_builder *b, nir_intrinsic_instr *intr) nir_src_copy(&global->src[0], &intr->src[0], &global->instr); nir_intrinsic_set_write_mask(global, nir_intrinsic_write_mask(intr)); } else { - nir_def_init(&global->instr, &global->dest.ssa, - intr->dest.ssa.num_components, intr->dest.ssa.bit_size); + nir_def_init(&global->instr, &global->def, + intr->def.num_components, intr->def.bit_size); if (is_atomic) { nir_src_copy(&global->src[1], &intr->src[2], &global->instr); @@ -126,7 +126,7 @@ lower_ssbo_instr(nir_builder *b, nir_intrinsic_instr *intr) } nir_builder_instr_insert(b, &global->instr); - return is_store ? NULL : &global->dest.ssa; + return is_store ? NULL : &global->def; } static bool @@ -169,7 +169,7 @@ nir_lower_ssbo(nir_shader *shader) nir_def *replace = lower_ssbo_instr(&b, intr); if (replace) { - nir_def_rewrite_uses(&intr->dest.ssa, + nir_def_rewrite_uses(&intr->def, replace); } diff --git a/src/compiler/nir/nir_lower_subgroups.c b/src/compiler/nir/nir_lower_subgroups.c index d71fdd3..657be6e 100644 --- a/src/compiler/nir/nir_lower_subgroups.c +++ b/src/compiler/nir/nir_lower_subgroups.c @@ -40,7 +40,7 @@ lower_subgroups_64bit_split_intrinsic(nir_builder *b, nir_intrinsic_instr *intri comp = nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa); nir_intrinsic_instr *intr = nir_intrinsic_instr_create(b->shader, intrin->intrinsic); - nir_def_init(&intr->instr, &intr->dest.ssa, 1, 32); + nir_def_init(&intr->instr, &intr->def, 1, 32); intr->const_index[0] = intrin->const_index[0]; intr->const_index[1] = intrin->const_index[1]; intr->src[0] = nir_src_for_ssa(comp); @@ -58,7 +58,7 @@ lower_subgroup_op_to_32bit(nir_builder *b, nir_intrinsic_instr *intrin) assert(intrin->src[0].ssa->bit_size == 64); nir_intrinsic_instr *intr_x = lower_subgroups_64bit_split_intrinsic(b, intrin, 0); nir_intrinsic_instr *intr_y = lower_subgroups_64bit_split_intrinsic(b, intrin, 1); - return nir_pack_64_2x32_split(b, &intr_x->dest.ssa, &intr_y->dest.ssa); + return nir_pack_64_2x32_split(b, &intr_x->def, &intr_y->def); } static nir_def * @@ -108,7 +108,7 @@ lower_subgroup_op_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin, bool lower_to_32bit) { /* This is safe to call on scalar things but it would be silly */ - assert(intrin->dest.ssa.num_components > 1); + assert(intrin->def.num_components > 1); nir_def *value = nir_ssa_for_src(b, intrin->src[0], intrin->num_components); @@ -117,8 +117,8 @@ lower_subgroup_op_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin, for (unsigned i = 0; i < intrin->num_components; i++) { nir_intrinsic_instr *chan_intrin = nir_intrinsic_instr_create(b->shader, intrin->intrinsic); - nir_def_init(&chan_intrin->instr, &chan_intrin->dest.ssa, 1, - intrin->dest.ssa.bit_size); + nir_def_init(&chan_intrin->instr, &chan_intrin->def, 1, + intrin->def.bit_size); chan_intrin->num_components = 1; /* value */ @@ -136,7 +136,7 @@ lower_subgroup_op_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin, reads[i] = lower_subgroup_op_to_32bit(b, chan_intrin); } else { nir_builder_instr_insert(b, &chan_intrin->instr); - reads[i] = &chan_intrin->dest.ssa; + reads[i] = &chan_intrin->def; } } @@ -152,16 +152,16 @@ lower_vote_eq_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin) for (unsigned i = 0; i < intrin->num_components; i++) { nir_intrinsic_instr *chan_intrin = nir_intrinsic_instr_create(b->shader, intrin->intrinsic); - nir_def_init(&chan_intrin->instr, &chan_intrin->dest.ssa, 1, - intrin->dest.ssa.bit_size); + nir_def_init(&chan_intrin->instr, &chan_intrin->def, 1, + intrin->def.bit_size); chan_intrin->num_components = 1; chan_intrin->src[0] = nir_src_for_ssa(nir_channel(b, value, i)); nir_builder_instr_insert(b, &chan_intrin->instr); if (result) { - result = nir_iand(b, result, &chan_intrin->dest.ssa); + result = nir_iand(b, result, &chan_intrin->def); } else { - result = &chan_intrin->dest.ssa; + result = &chan_intrin->def; } } @@ -209,8 +209,8 @@ lower_shuffle_to_swizzle(nir_builder *b, nir_intrinsic_instr *intrin, swizzle->num_components = intrin->num_components; nir_src_copy(&swizzle->src[0], &intrin->src[0], &swizzle->instr); nir_intrinsic_set_swizzle_mask(swizzle, (mask << 10) | 0x1f); - nir_def_init(&swizzle->instr, &swizzle->dest.ssa, - intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size); + nir_def_init(&swizzle->instr, &swizzle->def, + intrin->def.num_components, intrin->def.bit_size); if (options->lower_to_scalar && swizzle->num_components > 1) { return lower_subgroup_op_to_scalar(b, swizzle, options->lower_shuffle_to_32bit); @@ -218,7 +218,7 @@ lower_shuffle_to_swizzle(nir_builder *b, nir_intrinsic_instr *intrin, return lower_subgroup_op_to_32bit(b, swizzle); } else { nir_builder_instr_insert(b, &swizzle->instr); - return &swizzle->dest.ssa; + return &swizzle->def; } } @@ -299,8 +299,8 @@ lower_to_shuffle(nir_builder *b, nir_intrinsic_instr *intrin, shuffle->num_components = intrin->num_components; nir_src_copy(&shuffle->src[0], &intrin->src[0], &shuffle->instr); shuffle->src[1] = nir_src_for_ssa(index); - nir_def_init(&shuffle->instr, &shuffle->dest.ssa, - intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size); + nir_def_init(&shuffle->instr, &shuffle->def, + intrin->def.num_components, intrin->def.bit_size); bool lower_to_32bit = options->lower_shuffle_to_32bit && is_shuffle; if (options->lower_to_scalar && shuffle->num_components > 1) { @@ -309,7 +309,7 @@ lower_to_shuffle(nir_builder *b, nir_intrinsic_instr *intrin, return lower_subgroup_op_to_32bit(b, shuffle); } else { nir_builder_instr_insert(b, &shuffle->instr); - return &shuffle->dest.ssa; + return &shuffle->def; } } @@ -582,8 +582,8 @@ lower_dynamic_quad_broadcast(nir_builder *b, nir_intrinsic_instr *intrin, qbcst->num_components = intrin->num_components; qbcst->src[1] = nir_src_for_ssa(nir_imm_int(b, i)); nir_src_copy(&qbcst->src[0], &intrin->src[0], &qbcst->instr); - nir_def_init(&qbcst->instr, &qbcst->dest.ssa, - intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size); + nir_def_init(&qbcst->instr, &qbcst->def, + intrin->def.num_components, intrin->def.bit_size); nir_def *qbcst_dst = NULL; @@ -591,7 +591,7 @@ lower_dynamic_quad_broadcast(nir_builder *b, nir_intrinsic_instr *intrin, qbcst_dst = lower_subgroup_op_to_scalar(b, qbcst, false); } else { nir_builder_instr_insert(b, &qbcst->instr); - qbcst_dst = &qbcst->dest.ssa; + qbcst_dst = &qbcst->def; } if (i) @@ -607,7 +607,7 @@ lower_dynamic_quad_broadcast(nir_builder *b, nir_intrinsic_instr *intrin, static nir_def * lower_read_invocation_to_cond(nir_builder *b, nir_intrinsic_instr *intrin) { - return nir_read_invocation_cond_ir3(b, intrin->dest.ssa.bit_size, + return nir_read_invocation_cond_ir3(b, intrin->def.bit_size, intrin->src[0].ssa, nir_ieq(b, intrin->src[1].ssa, nir_load_subgroup_invocation(b))); @@ -689,13 +689,13 @@ lower_subgroups_instr(nir_builder *b, nir_instr *instr, void *_options) } return uint_to_ballot_type(b, val, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); + intrin->def.num_components, + intrin->def.bit_size); } case nir_intrinsic_ballot: { - if (intrin->dest.ssa.num_components == options->ballot_components && - intrin->dest.ssa.bit_size == options->ballot_bit_size) + if (intrin->def.num_components == options->ballot_components && + intrin->def.bit_size == options->ballot_bit_size) return NULL; nir_def *ballot = @@ -703,8 +703,8 @@ lower_subgroups_instr(nir_builder *b, nir_instr *instr, void *_options) intrin->src[0].ssa); return uint_to_ballot_type(b, ballot, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); + intrin->def.num_components, + intrin->def.bit_size); } case nir_intrinsic_ballot_bitfield_extract: diff --git a/src/compiler/nir/nir_lower_system_values.c b/src/compiler/nir/nir_lower_system_values.c index c018be2..8301284 100644 --- a/src/compiler/nir/nir_lower_system_values.c +++ b/src/compiler/nir/nir_lower_system_values.c @@ -42,12 +42,12 @@ struct lower_sysval_state { static nir_def * sanitize_32bit_sysval(nir_builder *b, nir_intrinsic_instr *intrin) { - const unsigned bit_size = intrin->dest.ssa.bit_size; + const unsigned bit_size = intrin->def.bit_size; if (bit_size == 32) return NULL; - intrin->dest.ssa.bit_size = 32; - return nir_u2uN(b, &intrin->dest.ssa, bit_size); + intrin->def.bit_size = 32; + return nir_u2uN(b, &intrin->def, bit_size); } static nir_def * @@ -74,7 +74,7 @@ lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state) if (!nir_intrinsic_infos[intrin->intrinsic].has_dest) return NULL; - const unsigned bit_size = intrin->dest.ssa.bit_size; + const unsigned bit_size = intrin->def.bit_size; switch (intrin->intrinsic) { case nir_intrinsic_load_vertex_id: @@ -201,10 +201,10 @@ lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state) nir_intrinsic_op op = nir_intrinsic_from_system_value(var->data.location); nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op); - nir_def_init_for_type(&load->instr, &load->dest.ssa, var->type); - load->num_components = load->dest.ssa.num_components; + nir_def_init_for_type(&load->instr, &load->def, var->type); + load->num_components = load->def.num_components; nir_builder_instr_insert(b, &load->instr); - return &load->dest.ssa; + return &load->def; } case SYSTEM_VALUE_DEVICE_INDEX: @@ -274,9 +274,9 @@ lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state) } case SYSTEM_VALUE_MESH_VIEW_INDICES: - return nir_load_mesh_view_indices(b, intrin->dest.ssa.num_components, + return nir_load_mesh_view_indices(b, intrin->def.num_components, bit_size, column, .base = 0, - .range = intrin->dest.ssa.num_components * bit_size / 8); + .range = intrin->def.num_components * bit_size / 8); default: break; @@ -288,33 +288,33 @@ lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state) assert(nir_intrinsic_infos[sysval_op].index_map[NIR_INTRINSIC_COLUMN] > 0); unsigned num_cols = glsl_get_matrix_columns(var->type); ASSERTED unsigned num_rows = glsl_get_vector_elements(var->type); - assert(num_rows == intrin->dest.ssa.num_components); + assert(num_rows == intrin->def.num_components); nir_def *cols[4]; for (unsigned i = 0; i < num_cols; i++) { cols[i] = nir_load_system_value(b, sysval_op, i, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); + intrin->def.num_components, + intrin->def.bit_size); assert(cols[i]->num_components == num_rows); } return nir_select_from_ssa_def_array(b, cols, num_cols, column); } else if (glsl_type_is_array(var->type)) { unsigned num_elems = glsl_get_length(var->type); ASSERTED const struct glsl_type *elem_type = glsl_get_array_element(var->type); - assert(glsl_get_components(elem_type) == intrin->dest.ssa.num_components); + assert(glsl_get_components(elem_type) == intrin->def.num_components); nir_def *elems[4]; assert(ARRAY_SIZE(elems) >= num_elems); for (unsigned i = 0; i < num_elems; i++) { elems[i] = nir_load_system_value(b, sysval_op, i, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); + intrin->def.num_components, + intrin->def.bit_size); } return nir_select_from_ssa_def_array(b, elems, num_elems, column); } else { return nir_load_system_value(b, sysval_op, 0, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); + intrin->def.num_components, + intrin->def.bit_size); } } @@ -492,7 +492,7 @@ lower_compute_system_value_instr(nir_builder *b, if (!nir_intrinsic_infos[intrin->intrinsic].has_dest) return NULL; - const unsigned bit_size = intrin->dest.ssa.bit_size; + const unsigned bit_size = intrin->def.bit_size; switch (intrin->intrinsic) { case nir_intrinsic_load_local_invocation_id: @@ -604,7 +604,7 @@ lower_compute_system_value_instr(nir_builder *b, if (!b->shader->info.workgroup_size_variable && is_zero) { nir_scalar defs[3]; for (unsigned i = 0; i < 3; i++) { - defs[i] = is_zero & (1 << i) ? nir_get_ssa_scalar(nir_imm_zero(b, 1, 32), 0) : nir_get_ssa_scalar(&intrin->dest.ssa, i); + defs[i] = is_zero & (1 << i) ? nir_get_ssa_scalar(nir_imm_zero(b, 1, 32), 0) : nir_get_ssa_scalar(&intrin->def, i); } return nir_vec_scalars(b, defs, 3); } @@ -740,7 +740,7 @@ lower_compute_system_value_instr(nir_builder *b, b->cursor = nir_after_instr(instr); - nir_def *num_wgs = &intrin->dest.ssa; + nir_def *num_wgs = &intrin->def; for (unsigned i = 0; i < 3; ++i) { if (num_wgs_imm[i]) num_wgs = nir_vector_insert_imm(b, num_wgs, nir_imm_int(b, num_wgs_imm[i]), i); diff --git a/src/compiler/nir/nir_lower_task_shader.c b/src/compiler/nir/nir_lower_task_shader.c index 1b4ef78..5a09b3d 100644 --- a/src/compiler/nir/nir_lower_task_shader.c +++ b/src/compiler/nir/nir_lower_task_shader.c @@ -58,7 +58,7 @@ lower_nv_task_output(nir_builder *b, nir_def *load = nir_load_shared(b, 1, 32, nir_imm_int(b, 0), .base = s->task_count_shared_addr); - nir_def_rewrite_uses(&intrin->dest.ssa, load); + nir_def_rewrite_uses(&intrin->def, load); nir_instr_remove(instr); return true; } @@ -376,7 +376,7 @@ requires_payload_in_shared(nir_shader *shader, bool atomics, bool small_types) return true; break; case nir_intrinsic_load_task_payload: - if (small_types && intrin->dest.ssa.bit_size < 32) + if (small_types && intrin->def.bit_size < 32) return true; break; case nir_intrinsic_store_task_payload: diff --git a/src/compiler/nir/nir_lower_tess_coord_z.c b/src/compiler/nir/nir_lower_tess_coord_z.c index a215a35..aec4165 100644 --- a/src/compiler/nir/nir_lower_tess_coord_z.c +++ b/src/compiler/nir/nir_lower_tess_coord_z.c @@ -30,7 +30,7 @@ lower_tess_coord_z(nir_builder *b, nir_instr *instr, void *state) else z = nir_imm_float(b, 0.0f); - nir_def_rewrite_uses(&intr->dest.ssa, nir_vec3(b, x, y, z)); + nir_def_rewrite_uses(&intr->def, nir_vec3(b, x, y, z)); return true; } diff --git a/src/compiler/nir/nir_lower_tex.c b/src/compiler/nir/nir_lower_tex.c index 85b6002..4d88fb2 100644 --- a/src/compiler/nir/nir_lower_tex.c +++ b/src/compiler/nir/nir_lower_tex.c @@ -288,7 +288,7 @@ lower_zero_lod(nir_builder *b, nir_tex_instr *tex) b->cursor = nir_before_instr(&tex->instr); if (tex->op == nir_texop_lod) { - nir_def_rewrite_uses(&tex->dest.ssa, nir_imm_int(b, 0)); + nir_def_rewrite_uses(&tex->def, nir_imm_int(b, 0)); nir_instr_remove(&tex->instr); return; } @@ -315,23 +315,23 @@ sample_plane(nir_builder *b, nir_tex_instr *tex, int plane, nir_imm_int(b, plane)); plane_tex->op = nir_texop_tex; plane_tex->sampler_dim = GLSL_SAMPLER_DIM_2D; - plane_tex->dest_type = nir_type_float | tex->dest.ssa.bit_size; + plane_tex->dest_type = nir_type_float | tex->def.bit_size; plane_tex->coord_components = 2; plane_tex->texture_index = tex->texture_index; plane_tex->sampler_index = tex->sampler_index; - nir_def_init(&plane_tex->instr, &plane_tex->dest.ssa, 4, - tex->dest.ssa.bit_size); + nir_def_init(&plane_tex->instr, &plane_tex->def, 4, + tex->def.bit_size); nir_builder_instr_insert(b, &plane_tex->instr); /* If scaling_factor is set, return a scaled value. */ if (options->scale_factors[tex->texture_index]) - return nir_fmul_imm(b, &plane_tex->dest.ssa, + return nir_fmul_imm(b, &plane_tex->def, options->scale_factors[tex->texture_index]); - return &plane_tex->dest.ssa; + return &plane_tex->def; } static void @@ -369,7 +369,7 @@ convert_yuv_to_rgb(nir_builder *b, nir_tex_instr *tex, } } - unsigned bit_size = tex->dest.ssa.bit_size; + unsigned bit_size = tex->def.bit_size; nir_def *offset = nir_vec4(b, @@ -387,7 +387,7 @@ convert_yuv_to_rgb(nir_builder *b, nir_tex_instr *tex, nir_def *result = nir_ffma(b, y, m0, nir_ffma(b, u, m1, nir_ffma(b, v, m2, offset))); - nir_def_rewrite_uses(&tex->dest.ssa, result); + nir_def_rewrite_uses(&tex->def, result); } static void @@ -886,11 +886,11 @@ lower_tex_to_txd(nir_builder *b, nir_tex_instr *tex) txd->src[tex->num_srcs] = nir_tex_src_for_ssa(nir_tex_src_ddx, dfdx); txd->src[tex->num_srcs + 1] = nir_tex_src_for_ssa(nir_tex_src_ddy, dfdy); - nir_def_init(&txd->instr, &txd->dest.ssa, - tex->dest.ssa.num_components, - tex->dest.ssa.bit_size); + nir_def_init(&txd->instr, &txd->def, + tex->def.num_components, + tex->def.bit_size); nir_builder_instr_insert(b, &txd->instr); - nir_def_rewrite_uses(&tex->dest.ssa, &txd->dest.ssa); + nir_def_rewrite_uses(&tex->def, &txd->def); nir_instr_remove(&tex->instr); return txd; } @@ -926,11 +926,11 @@ lower_txb_to_txl(nir_builder *b, nir_tex_instr *tex) lod = nir_fadd(b, nir_channel(b, lod, 1), nir_ssa_for_src(b, tex->src[bias_idx].src, 1)); txl->src[tex->num_srcs - 1] = nir_tex_src_for_ssa(nir_tex_src_lod, lod); - nir_def_init(&txl->instr, &txl->dest.ssa, - tex->dest.ssa.num_components, - tex->dest.ssa.bit_size); + nir_def_init(&txl->instr, &txl->def, + tex->def.num_components, + tex->def.bit_size); nir_builder_instr_insert(b, &txl->instr); - nir_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa); + nir_def_rewrite_uses(&tex->def, &txl->def); nir_instr_remove(&tex->instr); return txl; } @@ -1015,9 +1015,9 @@ swizzle_tg4_broadcom(nir_builder *b, nir_tex_instr *tex) assert(nir_tex_instr_dest_size(tex) == 4); unsigned swiz[4] = { 2, 3, 1, 0 }; - nir_def *swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4); + nir_def *swizzled = nir_swizzle(b, &tex->def, swiz, 4); - nir_def_rewrite_uses_after(&tex->dest.ssa, swizzled, + nir_def_rewrite_uses_after(&tex->def, swizzled, swizzled->parent_instr); } @@ -1041,12 +1041,12 @@ swizzle_result(nir_builder *b, nir_tex_instr *tex, const uint8_t swizzle[4]) swizzle[2] < 4 && swizzle[3] < 4) { unsigned swiz[4] = { swizzle[0], swizzle[1], swizzle[2], swizzle[3] }; /* We have no 0s or 1s, just emit a swizzling MOV */ - swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4); + swizzled = nir_swizzle(b, &tex->def, swiz, 4); } else { nir_scalar srcs[4]; for (unsigned i = 0; i < 4; i++) { if (swizzle[i] < 4) { - srcs[i] = nir_get_ssa_scalar(&tex->dest.ssa, swizzle[i]); + srcs[i] = nir_get_ssa_scalar(&tex->def, swizzle[i]); } else { srcs[i] = nir_get_ssa_scalar(get_zero_or_one(b, tex->dest_type, swizzle[i]), 0); } @@ -1055,7 +1055,7 @@ swizzle_result(nir_builder *b, nir_tex_instr *tex, const uint8_t swizzle[4]) } } - nir_def_rewrite_uses_after(&tex->dest.ssa, swizzled, + nir_def_rewrite_uses_after(&tex->def, swizzled, swizzled->parent_instr); } @@ -1068,16 +1068,16 @@ linearize_srgb_result(nir_builder *b, nir_tex_instr *tex) b->cursor = nir_after_instr(&tex->instr); nir_def *rgb = - nir_format_srgb_to_linear(b, nir_trim_vector(b, &tex->dest.ssa, 3)); + nir_format_srgb_to_linear(b, nir_trim_vector(b, &tex->def, 3)); /* alpha is untouched: */ nir_def *result = nir_vec4(b, nir_channel(b, rgb, 0), nir_channel(b, rgb, 1), nir_channel(b, rgb, 2), - nir_channel(b, &tex->dest.ssa, 3)); + nir_channel(b, &tex->def, 3)); - nir_def_rewrite_uses_after(&tex->dest.ssa, result, + nir_def_rewrite_uses_after(&tex->def, result, result->parent_instr); } @@ -1094,7 +1094,7 @@ static bool lower_tex_packing(nir_builder *b, nir_tex_instr *tex, const nir_lower_tex_options *options) { - nir_def *color = &tex->dest.ssa; + nir_def *color = &tex->def; b->cursor = nir_after_instr(&tex->instr); @@ -1158,7 +1158,7 @@ lower_tex_packing(nir_builder *b, nir_tex_instr *tex, break; } - nir_def_rewrite_uses_after(&tex->dest.ssa, color, + nir_def_rewrite_uses_after(&tex->def, color, color->parent_instr); return true; } @@ -1219,14 +1219,14 @@ lower_tg4_offsets(nir_builder *b, nir_tex_instr *tex) nir_tex_src src = nir_tex_src_for_ssa(nir_tex_src_offset, offset); tex_copy->src[tex_copy->num_srcs - 1] = src; - nir_def_init(&tex_copy->instr, &tex_copy->dest.ssa, + nir_def_init(&tex_copy->instr, &tex_copy->def, nir_tex_instr_dest_size(tex), 32); nir_builder_instr_insert(b, &tex_copy->instr); - dest[i] = nir_get_ssa_scalar(&tex_copy->dest.ssa, 3); + dest[i] = nir_get_ssa_scalar(&tex_copy->def, 3); if (tex->is_sparse) { - nir_def *code = nir_channel(b, &tex_copy->dest.ssa, 4); + nir_def *code = nir_channel(b, &tex_copy->def, 4); if (residency) residency = nir_sparse_residency_code_and(b, residency, code); else @@ -1235,8 +1235,8 @@ lower_tg4_offsets(nir_builder *b, nir_tex_instr *tex) } dest[4] = nir_get_ssa_scalar(residency, 0); - nir_def *res = nir_vec_scalars(b, dest, tex->dest.ssa.num_components); - nir_def_rewrite_uses(&tex->dest.ssa, res); + nir_def *res = nir_vec_scalars(b, dest, tex->def.num_components); + nir_def_rewrite_uses(&tex->def, res); nir_instr_remove(&tex->instr); return true; @@ -1265,8 +1265,8 @@ nir_lower_txs_lod(nir_builder *b, nir_tex_instr *tex) * which should return 0, not 1. */ b->cursor = nir_after_instr(&tex->instr); - nir_def *minified = nir_imin(b, &tex->dest.ssa, - nir_imax(b, nir_ushr(b, &tex->dest.ssa, lod), + nir_def *minified = nir_imin(b, &tex->def, + nir_imax(b, nir_ushr(b, &tex->def, lod), nir_imm_int(b, 1))); /* Make sure the component encoding the array size (if any) is not @@ -1279,11 +1279,11 @@ nir_lower_txs_lod(nir_builder *b, nir_tex_instr *tex) for (unsigned i = 0; i < dest_size - 1; i++) comp[i] = nir_channel(b, minified, i); - comp[dest_size - 1] = nir_channel(b, &tex->dest.ssa, dest_size - 1); + comp[dest_size - 1] = nir_channel(b, &tex->def, dest_size - 1); minified = nir_vec(b, comp, dest_size); } - nir_def_rewrite_uses_after(&tex->dest.ssa, minified, + nir_def_rewrite_uses_after(&tex->def, minified, minified->parent_instr); return true; } @@ -1296,14 +1296,14 @@ nir_lower_txs_cube_array(nir_builder *b, nir_tex_instr *tex) b->cursor = nir_after_instr(&tex->instr); - assert(tex->dest.ssa.num_components == 3); - nir_def *size = &tex->dest.ssa; + assert(tex->def.num_components == 3); + nir_def *size = &tex->def; size = nir_vec3(b, nir_channel(b, size, 1), nir_channel(b, size, 1), nir_idiv(b, nir_channel(b, size, 2), nir_imm_int(b, 6))); - nir_def_rewrite_uses_after(&tex->dest.ssa, size, size->parent_instr); + nir_def_rewrite_uses_after(&tex->def, size, size->parent_instr); } /* Adjust the sample index according to AMD FMASK (fragment mask). @@ -1341,7 +1341,7 @@ nir_lower_ms_txf_to_fragment_fetch(nir_builder *b, nir_tex_instr *tex) fmask_fetch->is_array = tex->is_array; fmask_fetch->texture_non_uniform = tex->texture_non_uniform; fmask_fetch->dest_type = nir_type_uint32; - nir_def_init(&fmask_fetch->instr, &fmask_fetch->dest.ssa, 1, 32); + nir_def_init(&fmask_fetch->instr, &fmask_fetch->def, 1, 32); fmask_fetch->num_srcs = 0; for (unsigned i = 0; i < tex->num_srcs; i++) { @@ -1358,7 +1358,7 @@ nir_lower_ms_txf_to_fragment_fetch(nir_builder *b, nir_tex_instr *tex) int ms_index = nir_tex_instr_src_index(tex, nir_tex_src_ms_index); assert(ms_index >= 0); nir_src sample = tex->src[ms_index].src; - nir_def *new_sample = nir_ubfe(b, &fmask_fetch->dest.ssa, + nir_def *new_sample = nir_ubfe(b, &fmask_fetch->def, nir_ishl_imm(b, sample.ssa, 2), nir_imm_int(b, 3)); /* Update instruction. */ @@ -1374,10 +1374,10 @@ nir_lower_samples_identical_to_fragment_fetch(nir_builder *b, nir_tex_instr *tex nir_tex_instr *fmask_fetch = nir_instr_as_tex(nir_instr_clone(b->shader, &tex->instr)); fmask_fetch->op = nir_texop_fragment_mask_fetch_amd; fmask_fetch->dest_type = nir_type_uint32; - nir_def_init(&fmask_fetch->instr, &fmask_fetch->dest.ssa, 1, 32); + nir_def_init(&fmask_fetch->instr, &fmask_fetch->def, 1, 32); nir_builder_instr_insert(b, &fmask_fetch->instr); - nir_def_rewrite_uses(&tex->dest.ssa, nir_ieq_imm(b, &fmask_fetch->dest.ssa, 0)); + nir_def_rewrite_uses(&tex->def, nir_ieq_imm(b, &fmask_fetch->def, 0)); nir_instr_remove_v(&tex->instr); } @@ -1405,12 +1405,12 @@ nir_lower_lod_zero_width(nir_builder *b, nir_tex_instr *tex) /* Replace the raw LOD by -FLT_MAX if the sum is 0 for all coordinates. */ nir_def *adjusted_lod = nir_bcsel(b, is_zero, nir_imm_float(b, -FLT_MAX), - nir_channel(b, &tex->dest.ssa, 1)); + nir_channel(b, &tex->def, 1)); nir_def *def = - nir_vec2(b, nir_channel(b, &tex->dest.ssa, 0), adjusted_lod); + nir_vec2(b, nir_channel(b, &tex->def, 0), adjusted_lod); - nir_def_rewrite_uses_after(&tex->dest.ssa, def, def->parent_instr); + nir_def_rewrite_uses_after(&tex->def, def, def->parent_instr); } static bool diff --git a/src/compiler/nir/nir_lower_tex_shadow.c b/src/compiler/nir/nir_lower_tex_shadow.c index a47c8b1..882a5cc 100644 --- a/src/compiler/nir/nir_lower_tex_shadow.c +++ b/src/compiler/nir/nir_lower_tex_shadow.c @@ -97,9 +97,9 @@ nir_lower_tex_shadow_impl(nir_builder *b, nir_instr *instr, void *options) } /* NIR expects a vec4 result from the above texture instructions */ - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); - nir_def *tex_r = nir_channel(b, &tex->dest.ssa, 0); + nir_def *tex_r = nir_channel(b, &tex->def, 0); nir_def *cmp = tex->src[comp_index].src.ssa; int proj_index = nir_tex_instr_src_index(tex, nir_tex_src_projector); diff --git a/src/compiler/nir/nir_lower_texcoord_replace.c b/src/compiler/nir/nir_lower_texcoord_replace.c index 7b54bcb..e4f29ba 100644 --- a/src/compiler/nir/nir_lower_texcoord_replace.c +++ b/src/compiler/nir/nir_lower_texcoord_replace.c @@ -115,9 +115,9 @@ nir_lower_texcoord_replace_impl(nir_function_impl *impl, nir_def *cond = nir_test_mask(&b, mask, coord_replace); nir_def *result = nir_bcsel(&b, cond, new_coord, - &intrin->dest.ssa); + &intrin->def); - nir_def_rewrite_uses_after(&intrin->dest.ssa, + nir_def_rewrite_uses_after(&intrin->def, result, result->parent_instr); } diff --git a/src/compiler/nir/nir_lower_texcoord_replace_late.c b/src/compiler/nir/nir_lower_texcoord_replace_late.c index c5d1ea1..b8730a0 100644 --- a/src/compiler/nir/nir_lower_texcoord_replace_late.c +++ b/src/compiler/nir/nir_lower_texcoord_replace_late.c @@ -65,7 +65,7 @@ pass(nir_builder *b, nir_instr *instr, void *data) sem.location = VARYING_SLOT_PNTC; nir_instr_rewrite_src_ssa(instr, offset, nir_imm_int(b, 0)); nir_intrinsic_set_io_semantics(intr, sem); - nir_def *raw = &intr->dest.ssa; + nir_def *raw = &intr->def; b->cursor = nir_after_instr(instr); channels[0] = nir_channel_or_undef(b, raw, 0 - component); @@ -73,7 +73,7 @@ pass(nir_builder *b, nir_instr *instr, void *data) } nir_def *res = nir_vec(b, &channels[component], intr->num_components); - nir_def_rewrite_uses_after(&intr->dest.ssa, res, + nir_def_rewrite_uses_after(&intr->def, res, res->parent_instr); return true; } diff --git a/src/compiler/nir/nir_lower_two_sided_color.c b/src/compiler/nir/nir_lower_two_sided_color.c index cfd47bc..c54ce96 100644 --- a/src/compiler/nir/nir_lower_two_sided_color.c +++ b/src/compiler/nir/nir_lower_two_sided_color.c @@ -163,7 +163,7 @@ nir_lower_two_sided_color_instr(nir_builder *b, nir_instr *instr, void *data) } nir_def *color = nir_bcsel(b, face, front, back); - nir_def_rewrite_uses(&intr->dest.ssa, color); + nir_def_rewrite_uses(&intr->def, color); return true; } diff --git a/src/compiler/nir/nir_lower_ubo_vec4.c b/src/compiler/nir/nir_lower_ubo_vec4.c index 36330e8..c73c229 100644 --- a/src/compiler/nir/nir_lower_ubo_vec4.c +++ b/src/compiler/nir/nir_lower_ubo_vec4.c @@ -93,7 +93,7 @@ nir_lower_ubo_vec4_lower(nir_builder *b, nir_instr *instr, void *data) unsigned align_mul = nir_intrinsic_align_mul(intr); unsigned align_offset = nir_intrinsic_align_offset(intr); - int chan_size_bytes = intr->dest.ssa.bit_size / 8; + int chan_size_bytes = intr->def.bit_size / 8; int chans_per_vec4 = 16 / chan_size_bytes; /* We don't care if someone figured out that things are aligned beyond @@ -110,12 +110,12 @@ nir_lower_ubo_vec4_lower(nir_builder *b, nir_instr *instr, void *data) num_components = chans_per_vec4; nir_intrinsic_instr *load = create_load(b, intr->src[0].ssa, vec4_offset, - intr->dest.ssa.bit_size, + intr->def.bit_size, num_components); nir_intrinsic_set_access(load, nir_intrinsic_access(intr)); - nir_def *result = &load->dest.ssa; + nir_def *result = &load->def; int align_chan_offset = align_offset / chan_size_bytes; if (aligned_mul) { @@ -151,7 +151,7 @@ nir_lower_ubo_vec4_lower(nir_builder *b, nir_instr *instr, void *data) */ nir_def *next_vec4_offset = nir_iadd_imm(b, vec4_offset, 1); nir_intrinsic_instr *next_load = create_load(b, intr->src[0].ssa, next_vec4_offset, - intr->dest.ssa.bit_size, + intr->def.bit_size, num_components); nir_def *channels[NIR_MAX_VEC_COMPONENTS]; @@ -170,8 +170,8 @@ nir_lower_ubo_vec4_lower(nir_builder *b, nir_instr *instr, void *data) nir_ieq(b, chan_vec4_offset, vec4_offset), - &load->dest.ssa, - &next_load->dest.ssa), + &load->def, + &next_load->def), component); } diff --git a/src/compiler/nir/nir_lower_uniforms_to_ubo.c b/src/compiler/nir/nir_lower_uniforms_to_ubo.c index ba99c0e..7b17939 100644 --- a/src/compiler/nir/nir_lower_uniforms_to_ubo.c +++ b/src/compiler/nir/nir_lower_uniforms_to_ubo.c @@ -69,21 +69,21 @@ nir_lower_uniforms_to_ubo_instr(nir_builder *b, nir_instr *instr, void *data) nir_def *ubo_idx = nir_imm_int(b, 0); nir_def *uniform_offset = nir_ssa_for_src(b, intr->src[0], 1); - assert(intr->dest.ssa.bit_size >= 8); + assert(intr->def.bit_size >= 8); nir_def *load_result; if (state->load_vec4) { /* No asking us to generate load_vec4 when you've packed your uniforms * as dwords instead of vec4s. */ assert(!state->dword_packed); - load_result = nir_load_ubo_vec4(b, intr->num_components, intr->dest.ssa.bit_size, + load_result = nir_load_ubo_vec4(b, intr->num_components, intr->def.bit_size, ubo_idx, uniform_offset, .base = nir_intrinsic_base(intr)); } else { /* For PIPE_CAP_PACKED_UNIFORMS, the uniforms are packed with the * base/offset in dword units instead of vec4 units. */ int multiplier = state->dword_packed ? 4 : 16; - load_result = nir_load_ubo(b, intr->num_components, intr->dest.ssa.bit_size, + load_result = nir_load_ubo(b, intr->num_components, intr->def.bit_size, ubo_idx, nir_iadd_imm(b, nir_imul_imm(b, uniform_offset, multiplier), nir_intrinsic_base(intr) * multiplier)); @@ -103,13 +103,13 @@ nir_lower_uniforms_to_ubo_instr(nir_builder *b, nir_instr *instr, void *data) nir_intrinsic_base(intr) * multiplier) % NIR_ALIGN_MUL_MAX); } else { - nir_intrinsic_set_align(load, MAX2(multiplier, intr->dest.ssa.bit_size / 8), 0); + nir_intrinsic_set_align(load, MAX2(multiplier, intr->def.bit_size / 8), 0); } nir_intrinsic_set_range_base(load, nir_intrinsic_base(intr) * multiplier); nir_intrinsic_set_range(load, nir_intrinsic_range(intr) * multiplier); } - nir_def_rewrite_uses(&intr->dest.ssa, load_result); + nir_def_rewrite_uses(&intr->def, load_result); nir_instr_remove(&intr->instr); return true; diff --git a/src/compiler/nir/nir_lower_variable_initializers.c b/src/compiler/nir/nir_lower_variable_initializers.c index 6417fff..ae37ece 100644 --- a/src/compiler/nir/nir_lower_variable_initializers.c +++ b/src/compiler/nir/nir_lower_variable_initializers.c @@ -76,7 +76,7 @@ lower_const_initializer(struct nir_builder *b, struct exec_list *var_list, nir_deref_instr *dst_deref = nir_build_deref_var(b, var); /* Note that this stores a pointer to src into dst */ - nir_store_deref(b, dst_deref, &src_deref->dest.ssa, ~0); + nir_store_deref(b, dst_deref, &src_deref->def, ~0); progress = true; var->pointer_initializer = NULL; diff --git a/src/compiler/nir/nir_lower_vars_to_ssa.c b/src/compiler/nir/nir_lower_vars_to_ssa.c index f857f86..b2a0cd6 100644 --- a/src/compiler/nir/nir_lower_vars_to_ssa.c +++ b/src/compiler/nir/nir_lower_vars_to_ssa.c @@ -414,12 +414,12 @@ register_load_instr(nir_intrinsic_instr *load_instr, nir_undef_instr *undef = nir_undef_instr_create(state->shader, load_instr->num_components, - load_instr->dest.ssa.bit_size); + load_instr->def.bit_size); nir_instr_insert_before(&load_instr->instr, &undef->instr); nir_instr_remove(&load_instr->instr); - nir_def_rewrite_uses(&load_instr->dest.ssa, &undef->def); + nir_def_rewrite_uses(&load_instr->def, &undef->def); return true; } @@ -603,12 +603,12 @@ rename_variables(struct lower_variables_state *state) mov->src[0].swizzle[i] = 0; nir_def_init(&mov->instr, &mov->def, - intrin->num_components, intrin->dest.ssa.bit_size); + intrin->num_components, intrin->def.bit_size); nir_instr_insert_before(&intrin->instr, &mov->instr); nir_instr_remove(&intrin->instr); - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, &mov->def); break; } diff --git a/src/compiler/nir/nir_lower_vec3_to_vec4.c b/src/compiler/nir/nir_lower_vec3_to_vec4.c index eb5cfc3..0738475 100644 --- a/src/compiler/nir/nir_lower_vec3_to_vec4.c +++ b/src/compiler/nir/nir_lower_vec3_to_vec4.c @@ -55,11 +55,11 @@ lower_vec3_to_vec4_instr(nir_builder *b, nir_instr *instr, void *data) break; intrin->num_components = 4; - intrin->dest.ssa.num_components = 4; + intrin->def.num_components = 4; b->cursor = nir_after_instr(&intrin->instr); - nir_def *vec3 = nir_trim_vector(b, &intrin->dest.ssa, 3); - nir_def_rewrite_uses_after(&intrin->dest.ssa, + nir_def *vec3 = nir_trim_vector(b, &intrin->def, 3); + nir_def_rewrite_uses_after(&intrin->def, vec3, vec3->parent_instr); return true; diff --git a/src/compiler/nir/nir_lower_wpos_center.c b/src/compiler/nir/nir_lower_wpos_center.c index 1726425a..3245419 100644 --- a/src/compiler/nir/nir_lower_wpos_center.c +++ b/src/compiler/nir/nir_lower_wpos_center.c @@ -47,7 +47,7 @@ static void update_fragcoord(nir_builder *b, nir_intrinsic_instr *intr) { - nir_def *wpos = &intr->dest.ssa; + nir_def *wpos = &intr->def; b->cursor = nir_after_instr(&intr->instr); @@ -60,7 +60,7 @@ update_fragcoord(nir_builder *b, nir_intrinsic_instr *intr) nir_imm_float(b, 0.0f), nir_imm_float(b, 0.0f))); - nir_def_rewrite_uses_after(&intr->dest.ssa, wpos, + nir_def_rewrite_uses_after(&intr->def, wpos, wpos->parent_instr); } diff --git a/src/compiler/nir/nir_lower_wpos_ytransform.c b/src/compiler/nir/nir_lower_wpos_ytransform.c index 2de93dcf..39acccc 100644 --- a/src/compiler/nir/nir_lower_wpos_ytransform.c +++ b/src/compiler/nir/nir_lower_wpos_ytransform.c @@ -75,7 +75,7 @@ emit_wpos_adjustment(lower_wpos_ytransform_state *state, nir_builder *b = &state->b; nir_def *wpostrans, *wpos_temp, *wpos_temp_y, *wpos_input; - wpos_input = &intr->dest.ssa; + wpos_input = &intr->def; b->cursor = nir_after_instr(&intr->instr); @@ -128,7 +128,7 @@ emit_wpos_adjustment(lower_wpos_ytransform_state *state, nir_channel(b, wpos_temp, 2), nir_channel(b, wpos_temp, 3)); - nir_def_rewrite_uses_after(&intr->dest.ssa, + nir_def_rewrite_uses_after(&intr->def, wpos_temp, wpos_temp->parent_instr); } @@ -274,7 +274,7 @@ lower_load_sample_pos(lower_wpos_ytransform_state *state, nir_builder *b = &state->b; b->cursor = nir_after_instr(&intr->instr); - nir_def *pos = &intr->dest.ssa; + nir_def *pos = &intr->def; nir_def *scale = nir_channel(b, get_transform(state), 0); nir_def *neg_scale = nir_channel(b, get_transform(state), 2); /* Either y or 1-y for scale equal to 1 or -1 respectively. */ @@ -283,7 +283,7 @@ lower_load_sample_pos(lower_wpos_ytransform_state *state, nir_fmul(b, nir_channel(b, pos, 1), scale)); nir_def *flipped_pos = nir_vec2(b, nir_channel(b, pos, 0), flipped_y); - nir_def_rewrite_uses_after(&intr->dest.ssa, flipped_pos, + nir_def_rewrite_uses_after(&intr->def, flipped_pos, flipped_pos->parent_instr); } diff --git a/src/compiler/nir/nir_opt_combine_stores.c b/src/compiler/nir/nir_opt_combine_stores.c index 1a05634..41059ca 100644 --- a/src/compiler/nir/nir_opt_combine_stores.c +++ b/src/compiler/nir/nir_opt_combine_stores.c @@ -156,7 +156,7 @@ combine_stores(struct combine_stores_state *state, if (store->num_components == 1) { store->num_components = num_components; nir_instr_rewrite_src(&store->instr, &store->src[0], - nir_src_for_ssa(&combo->dst->dest.ssa)); + nir_src_for_ssa(&combo->dst->def)); } assert(store->num_components == num_components); diff --git a/src/compiler/nir/nir_opt_constant_folding.c b/src/compiler/nir/nir_opt_constant_folding.c index 1bbfba5..7231678 100644 --- a/src/compiler/nir/nir_opt_constant_folding.c +++ b/src/compiler/nir/nir_opt_constant_folding.c @@ -208,9 +208,9 @@ try_fold_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin, nir_const_value *v = const_value_for_deref(deref); if (v) { b->cursor = nir_before_instr(&intrin->instr); - nir_def *val = nir_build_imm(b, intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size, v); - nir_def_rewrite_uses(&intrin->dest.ssa, val); + nir_def *val = nir_build_imm(b, intrin->def.num_components, + intrin->def.bit_size, v); + nir_def_rewrite_uses(&intrin->def, val); nir_instr_remove(&intrin->instr); return true; } @@ -233,23 +233,23 @@ try_fold_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin, b->cursor = nir_before_instr(&intrin->instr); nir_def *val; if (offset >= range) { - val = nir_undef(b, intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); + val = nir_undef(b, intrin->def.num_components, + intrin->def.bit_size); } else { nir_const_value imm[NIR_MAX_VEC_COMPONENTS]; memset(imm, 0, sizeof(imm)); uint8_t *data = (uint8_t *)b->shader->constant_data + base; for (unsigned i = 0; i < intrin->num_components; i++) { - unsigned bytes = intrin->dest.ssa.bit_size / 8; + unsigned bytes = intrin->def.bit_size / 8; bytes = MIN2(bytes, range - offset); memcpy(&imm[i].u64, data + offset, bytes); offset += bytes; } - val = nir_build_imm(b, intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size, imm); + val = nir_build_imm(b, intrin->def.num_components, + intrin->def.bit_size, imm); } - nir_def_rewrite_uses(&intrin->dest.ssa, val); + nir_def_rewrite_uses(&intrin->def, val); nir_instr_remove(&intrin->instr); return true; } @@ -273,7 +273,7 @@ try_fold_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin, * the data is constant. */ if (nir_src_is_const(intrin->src[0])) { - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, intrin->src[0].ssa); nir_instr_remove(&intrin->instr); return true; @@ -284,7 +284,7 @@ try_fold_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin, case nir_intrinsic_vote_ieq: if (nir_src_is_const(intrin->src[0])) { b->cursor = nir_before_instr(&intrin->instr); - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, nir_imm_true(b)); nir_instr_remove(&intrin->instr); return true; diff --git a/src/compiler/nir/nir_opt_copy_prop_vars.c b/src/compiler/nir/nir_opt_copy_prop_vars.c index c5e8d0c..c1d5253 100644 --- a/src/compiler/nir/nir_opt_copy_prop_vars.c +++ b/src/compiler/nir/nir_opt_copy_prop_vars.c @@ -727,7 +727,7 @@ load_from_ssa_entry_value(struct copy_prop_var_state *state, if (available != (1 << num_components) - 1 && intrin->intrinsic == nir_intrinsic_load_deref && - (available & nir_def_components_read(&intrin->dest.ssa)) == 0) { + (available & nir_def_components_read(&intrin->def)) == 0) { /* If none of the components read are available as SSA values, then we * should just bail. Otherwise, we would end up replacing the uses of * the load_deref a vecN() that just gathers up its components. @@ -738,7 +738,7 @@ load_from_ssa_entry_value(struct copy_prop_var_state *state, b->cursor = nir_after_instr(&intrin->instr); nir_def *load_def = - intrin->intrinsic == nir_intrinsic_load_deref ? &intrin->dest.ssa : NULL; + intrin->intrinsic == nir_intrinsic_load_deref ? &intrin->def : NULL; bool keep_intrin = false; nir_scalar comps[NIR_MAX_VEC_COMPONENTS]; @@ -1072,8 +1072,8 @@ copy_prop_vars_block(struct copy_prop_var_state *state, /* Loading from an invalid index yields an undef */ if (vec_index >= vec_comps) { b->cursor = nir_instr_remove(instr); - nir_def *u = nir_undef(b, 1, intrin->dest.ssa.bit_size); - nir_def_rewrite_uses(&intrin->dest.ssa, u); + nir_def *u = nir_undef(b, 1, intrin->def.bit_size); + nir_def_rewrite_uses(&intrin->def, u); state->progress = true; break; } @@ -1097,25 +1097,25 @@ copy_prop_vars_block(struct copy_prop_var_state *state, * We need to be careful when rewriting uses so we don't * rewrite the vecN itself. */ - nir_def_rewrite_uses_after(&intrin->dest.ssa, + nir_def_rewrite_uses_after(&intrin->def, value.ssa.def[0], value.ssa.def[0]->parent_instr); } else { - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, value.ssa.def[0]); } } else { /* We're turning it into a load of a different variable */ - intrin->src[0] = nir_src_for_ssa(&value.deref.instr->dest.ssa); + intrin->src[0] = nir_src_for_ssa(&value.deref.instr->def); /* Put it back in again. */ nir_builder_instr_insert(b, instr); - value_set_ssa_components(&value, &intrin->dest.ssa, + value_set_ssa_components(&value, &intrin->def, intrin->num_components); } state->progress = true; } else { - value_set_ssa_components(&value, &intrin->dest.ssa, + value_set_ssa_components(&value, &intrin->def, intrin->num_components); } @@ -1248,7 +1248,7 @@ copy_prop_vars_block(struct copy_prop_var_state *state, continue; /* Just turn it into a copy of a different deref */ - intrin->src[1] = nir_src_for_ssa(&value.deref.instr->dest.ssa); + intrin->src[1] = nir_src_for_ssa(&value.deref.instr->def); /* Put it back in again. */ nir_builder_instr_insert(b, instr); diff --git a/src/compiler/nir/nir_opt_dce.c b/src/compiler/nir/nir_opt_dce.c index dc9b6ef..80d9ce3 100644 --- a/src/compiler/nir/nir_opt_dce.c +++ b/src/compiler/nir/nir_opt_dce.c @@ -64,21 +64,21 @@ is_live(BITSET_WORD *defs_live, nir_instr *instr) } case nir_instr_type_deref: { nir_deref_instr *deref = nir_instr_as_deref(instr); - return is_def_live(&deref->dest.ssa, defs_live); + return is_def_live(&deref->def, defs_live); } case nir_instr_type_intrinsic: { nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); const nir_intrinsic_info *info = &nir_intrinsic_infos[intrin->intrinsic]; return !(info->flags & NIR_INTRINSIC_CAN_ELIMINATE) || - (info->has_dest && is_def_live(&intrin->dest.ssa, defs_live)); + (info->has_dest && is_def_live(&intrin->def, defs_live)); } case nir_instr_type_tex: { nir_tex_instr *tex = nir_instr_as_tex(instr); - return is_def_live(&tex->dest.ssa, defs_live); + return is_def_live(&tex->def, defs_live); } case nir_instr_type_phi: { nir_phi_instr *phi = nir_instr_as_phi(instr); - return is_def_live(&phi->dest.ssa, defs_live); + return is_def_live(&phi->def, defs_live); } case nir_instr_type_load_const: { nir_load_const_instr *lc = nir_instr_as_load_const(instr); @@ -91,7 +91,7 @@ is_live(BITSET_WORD *defs_live, nir_instr *instr) case nir_instr_type_parallel_copy: { nir_parallel_copy_instr *pc = nir_instr_as_parallel_copy(instr); nir_foreach_parallel_copy_entry(entry, pc) { - if (entry->dest_is_reg || is_def_live(&entry->dest.dest.ssa, defs_live)) + if (entry->dest_is_reg || is_def_live(&entry->dest.def, defs_live)) return true; } return false; diff --git a/src/compiler/nir/nir_opt_dead_cf.c b/src/compiler/nir/nir_opt_dead_cf.c index 7a1a0c4..352c6f8 100644 --- a/src/compiler/nir/nir_opt_dead_cf.c +++ b/src/compiler/nir/nir_opt_dead_cf.c @@ -109,7 +109,7 @@ opt_constant_if(nir_if *if_stmt, bool condition) } assert(def); - nir_def_rewrite_uses(&phi->dest.ssa, def); + nir_def_rewrite_uses(&phi->def, def); nir_instr_remove(&phi->instr); } } diff --git a/src/compiler/nir/nir_opt_if.c b/src/compiler/nir/nir_opt_if.c index e005db2..58df68f 100644 --- a/src/compiler/nir/nir_opt_if.c +++ b/src/compiler/nir/nir_opt_if.c @@ -499,7 +499,7 @@ opt_split_alu_of_phi(nir_builder *b, nir_loop *loop) nir_phi_instr_add_src(phi, prev_block, nir_src_for_ssa(prev_value)); nir_phi_instr_add_src(phi, continue_block, nir_src_for_ssa(alu_copy)); - nir_def_init(&phi->instr, &phi->dest.ssa, alu_copy->num_components, + nir_def_init(&phi->instr, &phi->def, alu_copy->num_components, alu_copy->bit_size); b->cursor = nir_after_phis(header_block); @@ -509,7 +509,7 @@ opt_split_alu_of_phi(nir_builder *b, nir_loop *loop) * result of the phi. */ nir_def_rewrite_uses(&alu->def, - &phi->dest.ssa); + &phi->def); /* Since the original ALU instruction no longer has any readers, just * remove it. @@ -663,7 +663,7 @@ opt_simplify_bcsel_of_phi(nir_builder *b, nir_loop *loop) continue_block) ->src); - nir_def_init(&phi->instr, &phi->dest.ssa, + nir_def_init(&phi->instr, &phi->def, bcsel->def.num_components, bcsel->def.bit_size); @@ -674,7 +674,7 @@ opt_simplify_bcsel_of_phi(nir_builder *b, nir_loop *loop) * the phi. */ nir_def_rewrite_uses(&bcsel->def, - &phi->dest.ssa); + &phi->def); /* Since the original bcsel instruction no longer has any readers, * just remove it. @@ -919,8 +919,8 @@ opt_if_phi_is_condition(nir_builder *b, nir_if *nif) nir_block *after_if_block = nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node)); nir_foreach_phi_safe(phi, after_if_block) { - if (phi->dest.ssa.bit_size != cond->bit_size || - phi->dest.ssa.num_components != 1) + if (phi->def.bit_size != cond->bit_size || + phi->def.num_components != 1) continue; enum opt_bool { @@ -946,11 +946,11 @@ opt_if_phi_is_condition(nir_builder *b, nir_if *nif) break; } if (then_val == T && else_val == F) { - nir_def_rewrite_uses(&phi->dest.ssa, cond); + nir_def_rewrite_uses(&phi->def, cond); progress = true; } else if (then_val == F && else_val == T) { b->cursor = nir_before_cf_node(&nif->cf_node); - nir_def_rewrite_uses(&phi->dest.ssa, nir_inot(b, cond)); + nir_def_rewrite_uses(&phi->def, nir_inot(b, cond)); progress = true; } } diff --git a/src/compiler/nir/nir_opt_intrinsics.c b/src/compiler/nir/nir_opt_intrinsics.c index a07fcee..95644ff 100644 --- a/src/compiler/nir/nir_opt_intrinsics.c +++ b/src/compiler/nir/nir_opt_intrinsics.c @@ -39,10 +39,10 @@ src_is_single_use_shuffle(nir_src src, nir_def **data, nir_def **index) * uses is reasonable. If we ever want to use this from an if statement, * we can change it then. */ - if (!list_is_singular(&shuffle->dest.ssa.uses)) + if (!list_is_singular(&shuffle->def.uses)) return false; - if (nir_def_used_by_if(&shuffle->dest.ssa)) + if (nir_def_used_by_if(&shuffle->def)) return false; *data = shuffle->src[0].ssa; @@ -243,10 +243,10 @@ opt_intrinsics_alu(nir_builder *b, nir_alu_instr *alu, static bool try_opt_exclusive_scan_to_inclusive(nir_intrinsic_instr *intrin) { - if (intrin->dest.ssa.num_components != 1) + if (intrin->def.num_components != 1) return false; - nir_foreach_use_including_if(src, &intrin->dest.ssa) { + nir_foreach_use_including_if(src, &intrin->def) { if (src->is_if || src->parent_instr->type != nir_instr_type_alu) return false; @@ -279,10 +279,10 @@ try_opt_exclusive_scan_to_inclusive(nir_intrinsic_instr *intrin) /* Convert to inclusive scan. */ intrin->intrinsic = nir_intrinsic_inclusive_scan; - nir_foreach_use_including_if_safe(src, &intrin->dest.ssa) { + nir_foreach_use_including_if_safe(src, &intrin->def) { /* Remove alu. */ nir_alu_instr *alu = nir_instr_as_alu(src->parent_instr); - nir_def_rewrite_uses(&alu->def, &intrin->dest.ssa); + nir_def_rewrite_uses(&alu->def, &intrin->def); nir_instr_remove(&alu->instr); } @@ -303,7 +303,7 @@ opt_intrinsics_intrin(nir_builder *b, nir_intrinsic_instr *intrin, return false; bool progress = false; - nir_foreach_use_safe(use_src, &intrin->dest.ssa) { + nir_foreach_use_safe(use_src, &intrin->def) { if (use_src->parent_instr->type == nir_instr_type_alu) { nir_alu_instr *alu = nir_instr_as_alu(use_src->parent_instr); diff --git a/src/compiler/nir/nir_opt_large_constants.c b/src/compiler/nir/nir_opt_large_constants.c index 6041608..d932fd2 100644 --- a/src/compiler/nir/nir_opt_large_constants.c +++ b/src/compiler/nir/nir_opt_large_constants.c @@ -361,7 +361,7 @@ nir_opt_large_constants(nir_shader *shader, if (info->is_constant) { b.cursor = nir_after_instr(&intrin->instr); nir_def *val = build_constant_load(&b, deref, size_align); - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, val); nir_instr_remove(&intrin->instr); nir_deref_instr_remove_if_unused(deref); diff --git a/src/compiler/nir/nir_opt_load_store_vectorize.c b/src/compiler/nir/nir_opt_load_store_vectorize.c index 2f58187..be1a0dd 100644 --- a/src/compiler/nir/nir_opt_load_store_vectorize.c +++ b/src/compiler/nir/nir_opt_load_store_vectorize.c @@ -231,7 +231,7 @@ sort_entries(const void *a_, const void *b_) static unsigned get_bit_size(struct entry *entry) { - unsigned size = entry->is_store ? entry->intrin->src[entry->info->value_src].ssa->bit_size : entry->intrin->dest.ssa.bit_size; + unsigned size = entry->is_store ? entry->intrin->src[entry->info->value_src].ssa->bit_size : entry->intrin->def.bit_size; return size == 1 ? 32u : size; } @@ -593,7 +593,7 @@ cast_deref(nir_builder *b, unsigned num_components, unsigned bit_size, nir_deref if (deref->type == type) return deref; - return nir_build_deref_cast(b, &deref->dest.ssa, deref->modes, type, 0); + return nir_build_deref_cast(b, &deref->def, deref->modes, type, 0); } /* Return true if "new_bit_size" is a usable bit size for a vectorized load/store @@ -656,7 +656,7 @@ subtract_deref(nir_builder *b, nir_deref_instr *deref, int64_t offset) offset % nir_deref_instr_array_stride(deref) == 0) { unsigned stride = nir_deref_instr_array_stride(deref); nir_def *index = nir_imm_intN_t(b, nir_src_as_int(deref->arr.index) - offset / stride, - deref->dest.ssa.bit_size); + deref->def.bit_size); return nir_build_deref_ptr_as_array(b, nir_deref_instr_parent(deref), index); } @@ -669,10 +669,10 @@ subtract_deref(nir_builder *b, nir_deref_instr *deref, int64_t offset) b, parent, nir_src_as_int(deref->arr.index) - offset / stride); } - deref = nir_build_deref_cast(b, &deref->dest.ssa, deref->modes, + deref = nir_build_deref_cast(b, &deref->def, deref->modes, glsl_scalar_type(GLSL_TYPE_UINT8), 1); return nir_build_deref_ptr_as_array( - b, deref, nir_imm_intN_t(b, -offset, deref->dest.ssa.bit_size)); + b, deref, nir_imm_intN_t(b, -offset, deref->def.bit_size)); } static void @@ -684,9 +684,9 @@ vectorize_loads(nir_builder *b, struct vectorize_ctx *ctx, { unsigned low_bit_size = get_bit_size(low); unsigned high_bit_size = get_bit_size(high); - bool low_bool = low->intrin->dest.ssa.bit_size == 1; - bool high_bool = high->intrin->dest.ssa.bit_size == 1; - nir_def *data = &first->intrin->dest.ssa; + bool low_bool = low->intrin->def.bit_size == 1; + bool high_bool = high->intrin->def.bit_size == 1; + nir_def *data = &first->intrin->def; b->cursor = nir_after_instr(first->instr); @@ -705,12 +705,12 @@ vectorize_loads(nir_builder *b, struct vectorize_ctx *ctx, /* update uses */ if (first == low) { - nir_def_rewrite_uses_after(&low->intrin->dest.ssa, low_def, + nir_def_rewrite_uses_after(&low->intrin->def, low_def, high_def->parent_instr); - nir_def_rewrite_uses(&high->intrin->dest.ssa, high_def); + nir_def_rewrite_uses(&high->intrin->def, high_def); } else { - nir_def_rewrite_uses(&low->intrin->dest.ssa, low_def); - nir_def_rewrite_uses_after(&high->intrin->dest.ssa, high_def, + nir_def_rewrite_uses(&low->intrin->def, low_def); + nir_def_rewrite_uses_after(&high->intrin->def, high_def, high_def->parent_instr); } @@ -743,7 +743,7 @@ vectorize_loads(nir_builder *b, struct vectorize_ctx *ctx, first->deref = cast_deref(b, new_num_components, new_bit_size, deref); nir_instr_rewrite_src(first->instr, &first->intrin->src[info->deref_src], - nir_src_for_ssa(&first->deref->dest.ssa)); + nir_src_for_ssa(&first->deref->def)); } /* update align */ @@ -836,7 +836,7 @@ vectorize_stores(nir_builder *b, struct vectorize_ctx *ctx, second->deref = cast_deref(b, new_num_components, new_bit_size, nir_src_as_deref(low->intrin->src[info->deref_src])); nir_instr_rewrite_src(second->instr, &second->intrin->src[info->deref_src], - nir_src_for_ssa(&second->deref->dest.ssa)); + nir_src_for_ssa(&second->deref->def)); } /* update base/align */ @@ -1197,9 +1197,9 @@ try_vectorize_shared2(struct vectorize_ctx *ctx, } else { nir_def *new_def = nir_load_shared2_amd(&b, low_size * 8u, offset, .offset1 = diff / stride, .st64 = st64); - nir_def_rewrite_uses(&low->intrin->dest.ssa, + nir_def_rewrite_uses(&low->intrin->def, nir_bitcast_vector(&b, nir_channel(&b, new_def, 0), low_bit_size)); - nir_def_rewrite_uses(&high->intrin->dest.ssa, + nir_def_rewrite_uses(&high->intrin->def, nir_bitcast_vector(&b, nir_channel(&b, new_def, 1), high_bit_size)); } diff --git a/src/compiler/nir/nir_opt_loop_unroll.c b/src/compiler/nir/nir_opt_loop_unroll.c index 6db8305..2217750 100644 --- a/src/compiler/nir/nir_opt_loop_unroll.c +++ b/src/compiler/nir/nir_opt_loop_unroll.c @@ -678,9 +678,9 @@ remove_out_of_bounds_induction_use(nir_shader *shader, nir_loop *loop, trip_count)) { if (intrin->intrinsic == nir_intrinsic_load_deref) { nir_def *undef = - nir_undef(&b, intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_undef(&b, intrin->def.num_components, + intrin->def.bit_size); + nir_def_rewrite_uses(&intrin->def, undef); } else { nir_instr_remove(instr); diff --git a/src/compiler/nir/nir_opt_memcpy.c b/src/compiler/nir/nir_opt_memcpy.c index aa73fa4..b7b3ae4 100644 --- a/src/compiler/nir/nir_opt_memcpy.c +++ b/src/compiler/nir/nir_opt_memcpy.c @@ -47,7 +47,7 @@ opt_memcpy_deref_cast(nir_intrinsic_instr *cpy, nir_src *deref_src) if (cast->type == glsl_int8_t_type() || cast->type == glsl_uint8_t_type()) { nir_instr_rewrite_src(&cpy->instr, deref_src, - nir_src_for_ssa(&parent->dest.ssa)); + nir_src_for_ssa(&parent->def)); return true; } @@ -65,7 +65,7 @@ opt_memcpy_deref_cast(nir_intrinsic_instr *cpy, nir_src *deref_src) return false; nir_instr_rewrite_src(&cpy->instr, deref_src, - nir_src_for_ssa(&parent->dest.ssa)); + nir_src_for_ssa(&parent->def)); return true; } @@ -185,7 +185,7 @@ try_lower_memcpy(nir_builder *b, nir_intrinsic_instr *cpy, type_is_tightly_packed(dst->type, &type_size) && type_size == size) { b->cursor = nir_instr_remove(&cpy->instr); - src = nir_build_deref_cast(b, &src->dest.ssa, + src = nir_build_deref_cast(b, &src->def, src->modes, dst->type, 0); nir_copy_deref_with_access(b, dst, src, nir_intrinsic_dst_access(cpy), @@ -205,7 +205,7 @@ try_lower_memcpy(nir_builder *b, nir_intrinsic_instr *cpy, _mesa_set_search(complex_vars, dst->var) == NULL && glsl_get_explicit_size(dst->type, false) <= size) { b->cursor = nir_instr_remove(&cpy->instr); - src = nir_build_deref_cast(b, &src->dest.ssa, + src = nir_build_deref_cast(b, &src->def, src->modes, dst->type, 0); nir_copy_deref_with_access(b, dst, src, nir_intrinsic_dst_access(cpy), @@ -217,7 +217,7 @@ try_lower_memcpy(nir_builder *b, nir_intrinsic_instr *cpy, type_is_tightly_packed(src->type, &type_size) && type_size == size) { b->cursor = nir_instr_remove(&cpy->instr); - dst = nir_build_deref_cast(b, &dst->dest.ssa, + dst = nir_build_deref_cast(b, &dst->def, dst->modes, src->type, 0); nir_copy_deref_with_access(b, dst, src, nir_intrinsic_dst_access(cpy), diff --git a/src/compiler/nir/nir_opt_offsets.c b/src/compiler/nir/nir_opt_offsets.c index 1feb2fd..58abb2c 100644 --- a/src/compiler/nir/nir_opt_offsets.c +++ b/src/compiler/nir/nir_opt_offsets.c @@ -147,7 +147,7 @@ try_fold_shared2(nir_builder *b, opt_offsets_state *state, unsigned offset_src_idx) { - unsigned comp_size = (intrin->intrinsic == nir_intrinsic_load_shared2_amd ? intrin->dest.ssa.bit_size : intrin->src[0].ssa->bit_size) / 8; + unsigned comp_size = (intrin->intrinsic == nir_intrinsic_load_shared2_amd ? intrin->def.bit_size : intrin->src[0].ssa->bit_size) / 8; unsigned stride = (nir_intrinsic_st64(intrin) ? 64 : 1) * comp_size; unsigned offset0 = nir_intrinsic_offset0(intrin) * stride; unsigned offset1 = nir_intrinsic_offset1(intrin) * stride; diff --git a/src/compiler/nir/nir_opt_peephole_select.c b/src/compiler/nir/nir_opt_peephole_select.c index 344d90c..cc6f04d 100644 --- a/src/compiler/nir/nir_opt_peephole_select.c +++ b/src/compiler/nir/nir_opt_peephole_select.c @@ -306,7 +306,7 @@ nir_opt_collapse_if(nir_if *if_stmt, nir_shader *shader, unsigned limit, nir_phi_src *else_src = nir_phi_get_src_from_block(phi, nir_if_first_else_block(if_stmt)); - nir_foreach_use(src, &phi->dest.ssa) { + nir_foreach_use(src, &phi->def) { assert(src->parent_instr->type == nir_instr_type_phi); nir_phi_src *phi_src = nir_phi_get_src_from_block(nir_instr_as_phi(src->parent_instr), @@ -337,13 +337,13 @@ nir_opt_collapse_if(nir_if *if_stmt, nir_shader *shader, unsigned limit, nir_phi_instr *phi = nir_instr_as_phi(instr); nir_phi_src *else_src = nir_phi_get_src_from_block(phi, nir_if_first_else_block(if_stmt)); - nir_foreach_use_safe(src, &phi->dest.ssa) { + nir_foreach_use_safe(src, &phi->def) { nir_phi_src *phi_src = nir_phi_get_src_from_block(nir_instr_as_phi(src->parent_instr), nir_if_first_else_block(parent_if)); if (phi_src->src.ssa == else_src->src.ssa) nir_instr_rewrite_src(src->parent_instr, &phi_src->src, - nir_src_for_ssa(&phi->dest.ssa)); + nir_src_for_ssa(&phi->def)); } } @@ -458,9 +458,9 @@ nir_opt_peephole_select_block(nir_block *block, nir_shader *shader, } nir_def_init(&sel->instr, &sel->def, - phi->dest.ssa.num_components, phi->dest.ssa.bit_size); + phi->def.num_components, phi->def.bit_size); - nir_def_rewrite_uses(&phi->dest.ssa, + nir_def_rewrite_uses(&phi->def, &sel->def); nir_instr_insert_before(&phi->instr, &sel->instr); diff --git a/src/compiler/nir/nir_opt_phi_precision.c b/src/compiler/nir/nir_opt_phi_precision.c index c0d53bb..b8e4666 100644 --- a/src/compiler/nir/nir_opt_phi_precision.c +++ b/src/compiler/nir/nir_opt_phi_precision.c @@ -204,13 +204,13 @@ try_move_narrowing_dst(nir_builder *b, nir_phi_instr *phi) nir_op op = INVALID_OP; /* If the phi has already been narrowed, nothing more to do: */ - if (phi->dest.ssa.bit_size != 32) + if (phi->def.bit_size != 32) return false; /* Are the only uses of the phi conversion instructions, and * are they all the same conversion? */ - nir_foreach_use_including_if(use, &phi->dest.ssa) { + nir_foreach_use_including_if(use, &phi->def) { /* an if use means the phi is used directly in a conditional, ie. * without a conversion */ @@ -230,8 +230,8 @@ try_move_narrowing_dst(nir_builder *b, nir_phi_instr *phi) /* construct replacement phi instruction: */ nir_phi_instr *new_phi = nir_phi_instr_create(b->shader); - nir_def_init(&new_phi->instr, &new_phi->dest.ssa, - phi->dest.ssa.num_components, + nir_def_init(&new_phi->instr, &new_phi->def, + phi->def.num_components, nir_alu_type_get_type_size(nir_op_infos[op].output_type)); /* Push the conversion into the new phi sources: */ @@ -249,14 +249,14 @@ try_move_narrowing_dst(nir_builder *b, nir_phi_instr *phi) * directly use the new phi, skipping the conversion out of the orig * phi */ - nir_foreach_use(use, &phi->dest.ssa) { + nir_foreach_use(use, &phi->def) { /* We've previously established that all the uses were alu * conversion ops. Turn them into movs instead. */ nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr); alu->op = nir_op_mov; } - nir_def_rewrite_uses(&phi->dest.ssa, &new_phi->dest.ssa); + nir_def_rewrite_uses(&phi->def, &new_phi->def); /* And finally insert the new phi after all sources are in place: */ b->cursor = nir_after_instr(&phi->instr); @@ -363,7 +363,7 @@ static bool try_move_widening_src(nir_builder *b, nir_phi_instr *phi) { /* If the phi has already been narrowed, nothing more to do: */ - if (phi->dest.ssa.bit_size != 32) + if (phi->def.bit_size != 32) return false; unsigned bit_size; @@ -374,8 +374,8 @@ try_move_widening_src(nir_builder *b, nir_phi_instr *phi) /* construct replacement phi instruction: */ nir_phi_instr *new_phi = nir_phi_instr_create(b->shader); - nir_def_init(&new_phi->instr, &new_phi->dest.ssa, - phi->dest.ssa.num_components, bit_size); + nir_def_init(&new_phi->instr, &new_phi->def, + phi->def.num_components, bit_size); /* Remove the widening conversions from the phi sources: */ nir_foreach_phi_src(src, phi) { @@ -419,9 +419,9 @@ try_move_widening_src(nir_builder *b, nir_phi_instr *phi) * and re-write the original phi's uses */ b->cursor = nir_after_instr_and_phis(&new_phi->instr); - nir_def *def = nir_build_alu(b, op, &new_phi->dest.ssa, NULL, NULL, NULL); + nir_def *def = nir_build_alu(b, op, &new_phi->def, NULL, NULL, NULL); - nir_def_rewrite_uses(&phi->dest.ssa, def); + nir_def_rewrite_uses(&phi->def, def); return true; } diff --git a/src/compiler/nir/nir_opt_ray_queries.c b/src/compiler/nir/nir_opt_ray_queries.c index b21ecd6..e9d2e7d 100644 --- a/src/compiler/nir/nir_opt_ray_queries.c +++ b/src/compiler/nir/nir_opt_ray_queries.c @@ -72,7 +72,7 @@ nir_find_ray_queries_read(struct set *queries, nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); switch (intrin->intrinsic) { case nir_intrinsic_rq_proceed: - if (!list_is_empty(&intrin->dest.ssa.uses)) + if (!list_is_empty(&intrin->def.uses)) mark_query_read(queries, intrin); break; case nir_intrinsic_rq_load: @@ -115,7 +115,7 @@ nir_replace_unread_queries_instr(nir_builder *b, nir_instr *instr, void *data) return false; if (intrin->intrinsic == nir_intrinsic_rq_load) - assert(list_is_empty(&intrin->dest.ssa.uses)); + assert(list_is_empty(&intrin->def.uses)); nir_instr_remove(instr); diff --git a/src/compiler/nir/nir_opt_remove_phis.c b/src/compiler/nir/nir_opt_remove_phis.c index c7406a2..54b538f 100644 --- a/src/compiler/nir/nir_opt_remove_phis.c +++ b/src/compiler/nir/nir_opt_remove_phis.c @@ -85,7 +85,7 @@ remove_phis_block(nir_block *block, nir_builder *b) * still dominate the phi node, and the phi will still always take * the value of that definition. */ - if (src->src.ssa == &phi->dest.ssa) + if (src->src.ssa == &phi->def) continue; if (def == NULL) { @@ -109,8 +109,8 @@ remove_phis_block(nir_block *block, nir_builder *b) /* In this case, the phi had no sources. So turn it into an undef. */ b->cursor = nir_after_phis(block); - def = nir_undef(b, phi->dest.ssa.num_components, - phi->dest.ssa.bit_size); + def = nir_undef(b, phi->def.num_components, + phi->def.bit_size); } else if (mov) { /* If the sources were all movs from the same source with the same * swizzle, then we can't just pick a random move because it may not @@ -124,7 +124,7 @@ remove_phis_block(nir_block *block, nir_builder *b) def = nir_mov_alu(b, mov->src[0], def->num_components); } - nir_def_rewrite_uses(&phi->dest.ssa, def); + nir_def_rewrite_uses(&phi->def, def); nir_instr_remove(&phi->instr); progress = true; diff --git a/src/compiler/nir/nir_opt_shrink_vectors.c b/src/compiler/nir/nir_opt_shrink_vectors.c index 9b01dd2..b7593e0 100644 --- a/src/compiler/nir/nir_opt_shrink_vectors.c +++ b/src/compiler/nir/nir_opt_shrink_vectors.c @@ -93,15 +93,15 @@ shrink_dest_to_read_mask(nir_def *def) static bool shrink_intrinsic_to_non_sparse(nir_intrinsic_instr *instr) { - unsigned mask = nir_def_components_read(&instr->dest.ssa); + unsigned mask = nir_def_components_read(&instr->def); int last_bit = util_last_bit(mask); /* If the sparse component is used, do nothing. */ - if (last_bit == instr->dest.ssa.num_components) + if (last_bit == instr->def.num_components) return false; - instr->dest.ssa.num_components -= 1; - instr->num_components = instr->dest.ssa.num_components; + instr->def.num_components -= 1; + instr->num_components = instr->def.num_components; /* Switch to the non-sparse intrinsic. */ switch (instr->intrinsic) { @@ -302,10 +302,10 @@ opt_shrink_vectors_intrinsic(nir_builder *b, nir_intrinsic_instr *instr) assert(instr->num_components != 0); /* Trim the dest to the used channels */ - if (!shrink_dest_to_read_mask(&instr->dest.ssa)) + if (!shrink_dest_to_read_mask(&instr->def)) return false; - instr->num_components = instr->dest.ssa.num_components; + instr->num_components = instr->def.num_components; return true; } case nir_intrinsic_image_sparse_load: @@ -323,14 +323,14 @@ opt_shrink_vectors_tex(nir_builder *b, nir_tex_instr *tex) if (!tex->is_sparse) return false; - unsigned mask = nir_def_components_read(&tex->dest.ssa); + unsigned mask = nir_def_components_read(&tex->def); int last_bit = util_last_bit(mask); /* If the sparse component is used, do nothing. */ - if (last_bit == tex->dest.ssa.num_components) + if (last_bit == tex->def.num_components) return false; - tex->dest.ssa.num_components -= 1; + tex->def.num_components -= 1; tex->is_sparse = false; return true; @@ -403,7 +403,7 @@ opt_shrink_vectors_ssa_undef(nir_undef_instr *instr) static bool opt_shrink_vectors_phi(nir_builder *b, nir_phi_instr *instr) { - nir_def *def = &instr->dest.ssa; + nir_def *def = &instr->def; /* early out if there's nothing to do. */ if (def->num_components == 1) diff --git a/src/compiler/nir/nir_opt_uniform_atomics.c b/src/compiler/nir/nir_opt_uniform_atomics.c index cb53618..d460d35 100644 --- a/src/compiler/nir/nir_opt_uniform_atomics.c +++ b/src/compiler/nir/nir_opt_uniform_atomics.c @@ -257,10 +257,10 @@ optimize_atomic(nir_builder *b, nir_intrinsic_instr *intrin, bool return_prev) if (return_prev) { nir_push_else(b, nif); - nir_def *undef = nir_undef(b, 1, intrin->dest.ssa.bit_size); + nir_def *undef = nir_undef(b, 1, intrin->def.bit_size); nir_pop_if(b, nif); - nir_def *result = nir_if_phi(b, &intrin->dest.ssa, undef); + nir_def *result = nir_if_phi(b, &intrin->def, undef); result = nir_read_first_invocation(b, result); if (!combined_scan_reduce) @@ -282,13 +282,13 @@ optimize_and_rewrite_atomic(nir_builder *b, nir_intrinsic_instr *intrin) helper_nif = nir_push_if(b, nir_inot(b, helper)); } - ASSERTED bool original_result_divergent = intrin->dest.ssa.divergent; - bool return_prev = !nir_def_is_unused(&intrin->dest.ssa); + ASSERTED bool original_result_divergent = intrin->def.divergent; + bool return_prev = !nir_def_is_unused(&intrin->def); - nir_def old_result = intrin->dest.ssa; - list_replace(&intrin->dest.ssa.uses, &old_result.uses); - nir_def_init(&intrin->instr, &intrin->dest.ssa, 1, - intrin->dest.ssa.bit_size); + nir_def old_result = intrin->def; + list_replace(&intrin->def.uses, &old_result.uses); + nir_def_init(&intrin->instr, &intrin->def, 1, + intrin->def.bit_size); nir_def *result = optimize_atomic(b, intrin, return_prev); diff --git a/src/compiler/nir/nir_phi_builder.c b/src/compiler/nir/nir_phi_builder.c index 3d0171f..8fc5560 100644 --- a/src/compiler/nir/nir_phi_builder.c +++ b/src/compiler/nir/nir_phi_builder.c @@ -232,11 +232,11 @@ nir_phi_builder_value_get_block_def(struct nir_phi_builder_value *val, * be used. */ nir_phi_instr *phi = nir_phi_instr_create(val->builder->shader); - nir_def_init(&phi->instr, &phi->dest.ssa, val->num_components, + nir_def_init(&phi->instr, &phi->def, val->num_components, val->bit_size); phi->instr.block = dom; exec_list_push_tail(&val->phis, &phi->instr.node); - def = &phi->dest.ssa; + def = &phi->def; he->data = def; } else { /* In this case, we have an actual SSA def. It's either the result of a diff --git a/src/compiler/nir/nir_print.c b/src/compiler/nir/nir_print.c index aa21342..860c207 100644 --- a/src/compiler/nir/nir_print.c +++ b/src/compiler/nir/nir_print.c @@ -972,7 +972,7 @@ print_deref_instr(nir_deref_instr *instr, print_state *state) { FILE *fp = state->fp; - print_def(&instr->dest.ssa, state); + print_def(&instr->def, state); switch (instr->deref_type) { case nir_deref_type_var: @@ -1096,7 +1096,7 @@ print_intrinsic_instr(nir_intrinsic_instr *instr, print_state *state) FILE *fp = state->fp; if (info->has_dest) { - print_def(&instr->dest.ssa, state); + print_def(&instr->def, state); fprintf(fp, " = "); } else { print_no_dest_padding(state); @@ -1540,7 +1540,7 @@ print_tex_instr(nir_tex_instr *instr, print_state *state) { FILE *fp = state->fp; - print_def(&instr->dest.ssa, state); + print_def(&instr->def, state); fprintf(fp, " = ("); print_alu_type(instr->dest_type, state); @@ -1795,7 +1795,7 @@ static void print_phi_instr(nir_phi_instr *instr, print_state *state) { FILE *fp = state->fp; - print_def(&instr->dest.ssa, state); + print_def(&instr->def, state); fprintf(fp, " = phi "); nir_foreach_phi_src(src, instr) { if (&src->node != exec_list_get_head(&instr->srcs)) @@ -1818,7 +1818,7 @@ print_parallel_copy_instr(nir_parallel_copy_instr *instr, print_state *state) fprintf(fp, "*"); print_src(&entry->dest.reg, state, nir_type_invalid); } else { - print_def(&entry->dest.dest.ssa, state); + print_def(&entry->dest.def, state); } fprintf(fp, " = "); diff --git a/src/compiler/nir/nir_propagate_invariant.c b/src/compiler/nir/nir_propagate_invariant.c index 4b930a9..0322aad 100644 --- a/src/compiler/nir/nir_propagate_invariant.c +++ b/src/compiler/nir/nir_propagate_invariant.c @@ -91,7 +91,7 @@ propagate_invariant_instr(nir_instr *instr, struct set *invariants) case nir_instr_type_tex: { nir_tex_instr *tex = nir_instr_as_tex(instr); - if (def_is_invariant(&tex->dest.ssa, invariants)) + if (def_is_invariant(&tex->def, invariants)) nir_foreach_src(instr, add_src_cb, invariants); break; } @@ -106,7 +106,7 @@ propagate_invariant_instr(nir_instr *instr, struct set *invariants) break; case nir_intrinsic_load_deref: - if (def_is_invariant(&intrin->dest.ssa, invariants)) + if (def_is_invariant(&intrin->def, invariants)) add_var(nir_intrinsic_get_var(intrin, 0), invariants); break; @@ -130,7 +130,7 @@ propagate_invariant_instr(nir_instr *instr, struct set *invariants) case nir_instr_type_phi: { nir_phi_instr *phi = nir_instr_as_phi(instr); - if (!def_is_invariant(&phi->dest.ssa, invariants)) + if (!def_is_invariant(&phi->def, invariants)) break; nir_foreach_phi_src(src, phi) { diff --git a/src/compiler/nir/nir_range_analysis.c b/src/compiler/nir/nir_range_analysis.c index e666e05..da1f1bc 100644 --- a/src/compiler/nir/nir_range_analysis.c +++ b/src/compiler/nir/nir_range_analysis.c @@ -2141,7 +2141,7 @@ ssa_def_bits_used(const nir_def *def, int recur) case nir_intrinsic_quad_swap_vertical: case nir_intrinsic_quad_swap_diagonal: if (src_idx == 0) { - bits_used |= ssa_def_bits_used(&use_intrin->dest.ssa, recur); + bits_used |= ssa_def_bits_used(&use_intrin->def, recur); } else { if (use_intrin->intrinsic == nir_intrinsic_quad_broadcast) { bits_used |= 3; @@ -2162,7 +2162,7 @@ ssa_def_bits_used(const nir_def *def, int recur) case nir_op_ior: case nir_op_iand: case nir_op_ixor: - bits_used |= ssa_def_bits_used(&use_intrin->dest.ssa, recur); + bits_used |= ssa_def_bits_used(&use_intrin->def, recur); break; default: @@ -2179,7 +2179,7 @@ ssa_def_bits_used(const nir_def *def, int recur) case nir_instr_type_phi: { nir_phi_instr *use_phi = nir_instr_as_phi(src->parent_instr); - bits_used |= ssa_def_bits_used(&use_phi->dest.ssa, recur); + bits_used |= ssa_def_bits_used(&use_phi->def, recur); break; } diff --git a/src/compiler/nir/nir_remove_dead_variables.c b/src/compiler/nir/nir_remove_dead_variables.c index 853a132..43fa897 100644 --- a/src/compiler/nir/nir_remove_dead_variables.c +++ b/src/compiler/nir/nir_remove_dead_variables.c @@ -30,7 +30,7 @@ static bool deref_used_for_not_store(nir_deref_instr *deref) { - nir_foreach_use(src, &deref->dest.ssa) { + nir_foreach_use(src, &deref->def) { switch (src->parent_instr->type) { case nir_instr_type_deref: if (deref_used_for_not_store(nir_instr_as_deref(src->parent_instr))) diff --git a/src/compiler/nir/nir_repair_ssa.c b/src/compiler/nir/nir_repair_ssa.c index 4f3cc9c..ca34ec5 100644 --- a/src/compiler/nir/nir_repair_ssa.c +++ b/src/compiler/nir/nir_repair_ssa.c @@ -126,11 +126,11 @@ repair_ssa_def(nir_def *def, void *void_state) cast->parent = nir_src_for_ssa(block_def); cast->cast.ptr_stride = nir_deref_instr_array_stride(deref); - nir_def_init(&cast->instr, &cast->dest.ssa, def->num_components, + nir_def_init(&cast->instr, &cast->def, def->num_components, def->bit_size); nir_instr_insert(nir_before_instr(src->parent_instr), &cast->instr); - block_def = &cast->dest.ssa; + block_def = &cast->def; } if (src->is_if) diff --git a/src/compiler/nir/nir_schedule.c b/src/compiler/nir/nir_schedule.c index 2454523..8db3315 100644 --- a/src/compiler/nir/nir_schedule.c +++ b/src/compiler/nir/nir_schedule.c @@ -565,7 +565,7 @@ nir_schedule_regs_freed_load_reg(nir_intrinsic_instr *load, state->regs_freed += nir_schedule_reg_pressure(reg); } - nir_schedule_regs_freed_def_cb(&load->dest.ssa, state); + nir_schedule_regs_freed_def_cb(&load->def, state); } static void @@ -953,7 +953,7 @@ nir_schedule_mark_load_reg_scheduled(nir_intrinsic_instr *load, nir_schedule_mark_use(scoreboard, reg, &load->instr, nir_schedule_reg_pressure(reg)); - nir_schedule_mark_def_scheduled(&load->dest.ssa, scoreboard); + nir_schedule_mark_def_scheduled(&load->def, scoreboard); } static void diff --git a/src/compiler/nir/nir_serialize.c b/src/compiler/nir/nir_serialize.c index b3c74fe..05aeac8 100644 --- a/src/compiler/nir/nir_serialize.c +++ b/src/compiler/nir/nir_serialize.c @@ -910,7 +910,7 @@ write_deref(write_ctx *ctx, const nir_deref_instr *deref) header.deref.in_bounds = deref->arr.in_bounds; } - write_def(ctx, &deref->dest.ssa, header, deref->instr.type); + write_def(ctx, &deref->def, header, deref->instr.type); switch (deref->deref_type) { case nir_deref_type_var: @@ -962,7 +962,7 @@ read_deref(read_ctx *ctx, union packed_instr header) nir_deref_type deref_type = header.deref.deref_type; nir_deref_instr *deref = nir_deref_instr_create(ctx->nir, deref_type); - read_def(ctx, &deref->dest.ssa, &deref->instr, header); + read_def(ctx, &deref->def, &deref->instr, header); nir_deref_instr *parent; @@ -1077,7 +1077,7 @@ write_intrinsic(write_ctx *ctx, const nir_intrinsic_instr *intrin) } if (nir_intrinsic_infos[intrin->intrinsic].has_dest) - write_def(ctx, &intrin->dest.ssa, header, intrin->instr.type); + write_def(ctx, &intrin->def, header, intrin->instr.type); else blob_write_uint32(ctx->blob, header.u32); @@ -1112,7 +1112,7 @@ read_intrinsic(read_ctx *ctx, union packed_instr header) unsigned num_indices = nir_intrinsic_infos[op].num_indices; if (nir_intrinsic_infos[op].has_dest) - read_def(ctx, &intrin->dest.ssa, &intrin->instr, header); + read_def(ctx, &intrin->def, &intrin->instr, header); for (unsigned i = 0; i < num_srcs; i++) read_src(ctx, &intrin->src[i]); @@ -1122,7 +1122,7 @@ read_intrinsic(read_ctx *ctx, union packed_instr header) */ if (nir_intrinsic_infos[op].has_dest && nir_intrinsic_infos[op].dest_components == 0) { - intrin->num_components = intrin->dest.ssa.num_components; + intrin->num_components = intrin->def.num_components; } else { for (unsigned i = 0; i < num_srcs; i++) { if (nir_intrinsic_infos[op].src_components[i] == 0) { @@ -1380,7 +1380,7 @@ write_tex(write_ctx *ctx, const nir_tex_instr *tex) header.tex.num_srcs = tex->num_srcs; header.tex.op = tex->op; - write_def(ctx, &tex->dest.ssa, header, tex->instr.type); + write_def(ctx, &tex->def, header, tex->instr.type); blob_write_uint32(ctx->blob, tex->texture_index); blob_write_uint32(ctx->blob, tex->sampler_index); @@ -1418,7 +1418,7 @@ read_tex(read_ctx *ctx, union packed_instr header) { nir_tex_instr *tex = nir_tex_instr_create(ctx->nir, header.tex.num_srcs); - read_def(ctx, &tex->dest.ssa, &tex->instr, header); + read_def(ctx, &tex->def, &tex->instr, header); tex->op = header.tex.op; tex->texture_index = blob_read_uint32(ctx->blob); @@ -1464,7 +1464,7 @@ write_phi(write_ctx *ctx, const nir_phi_instr *phi) * and then store enough information so that a later fixup pass can fill * them in correctly. */ - write_def(ctx, &phi->dest.ssa, header, phi->instr.type); + write_def(ctx, &phi->def, header, phi->instr.type); nir_foreach_phi_src(src, phi) { size_t blob_offset = blob_reserve_uint32(ctx->blob); @@ -1497,7 +1497,7 @@ read_phi(read_ctx *ctx, nir_block *blk, union packed_instr header) { nir_phi_instr *phi = nir_phi_instr_create(ctx->nir); - read_def(ctx, &phi->dest.ssa, &phi->instr, header); + read_def(ctx, &phi->def, &phi->instr, header); /* For similar reasons as before, we just store the index directly into the * pointer, and let a later pass resolve the phi sources. diff --git a/src/compiler/nir/nir_split_64bit_vec3_and_vec4.c b/src/compiler/nir/nir_split_64bit_vec3_and_vec4.c index 0771559..e061e26 100644 --- a/src/compiler/nir/nir_split_64bit_vec3_and_vec4.c +++ b/src/compiler/nir/nir_split_64bit_vec3_and_vec4.c @@ -48,12 +48,12 @@ nir_split_64bit_vec3_and_vec4_filter(const nir_instr *instr, switch (intr->intrinsic) { case nir_intrinsic_load_deref: { - if (intr->dest.ssa.bit_size != 64) + if (intr->def.bit_size != 64) return false; nir_variable *var = nir_intrinsic_get_var(intr, 0); if (var->data.mode != nir_var_function_temp) return false; - return intr->dest.ssa.num_components >= 3; + return intr->def.num_components >= 3; } case nir_intrinsic_store_deref: { if (nir_src_bit_size(intr->src[1]) != 64) @@ -69,9 +69,9 @@ nir_split_64bit_vec3_and_vec4_filter(const nir_instr *instr, } case nir_instr_type_phi: { nir_phi_instr *phi = nir_instr_as_phi(instr); - if (phi->dest.ssa.bit_size != 64) + if (phi->def.bit_size != 64) return false; - return phi->dest.ssa.num_components >= 3; + return phi->def.num_components >= 3; } default: @@ -107,7 +107,7 @@ get_linear_array_offset(nir_builder *b, nir_deref_instr *deref) nir_deref_path path; nir_deref_path_init(&path, deref, NULL); - nir_def *offset = nir_imm_intN_t(b, 0, deref->dest.ssa.bit_size); + nir_def *offset = nir_imm_intN_t(b, 0, deref->def.bit_size); for (nir_deref_instr **p = &path.path[1]; *p; p++) { switch ((*p)->deref_type) { case nir_deref_type_array: { @@ -181,8 +181,8 @@ split_load_deref(nir_builder *b, nir_intrinsic_instr *intr, deref2 = nir_build_deref_array(b, deref2, offset); } - nir_def *load1 = nir_build_load_deref(b, 2, 64, &deref1->dest.ssa, 0); - nir_def *load2 = nir_build_load_deref(b, old_components - 2, 64, &deref2->dest.ssa, 0); + nir_def *load1 = nir_build_load_deref(b, 2, 64, &deref1->def, 0); + nir_def *load2 = nir_build_load_deref(b, old_components - 2, 64, &deref2->def, 0); return merge_to_vec3_or_vec4(b, load1, load2); } @@ -206,14 +206,14 @@ split_store_deref(nir_builder *b, nir_intrinsic_instr *intr, int write_mask_xy = nir_intrinsic_write_mask(intr) & 3; if (write_mask_xy) { nir_def *src_xy = nir_trim_vector(b, intr->src[1].ssa, 2); - nir_build_store_deref(b, &deref_xy->dest.ssa, src_xy, write_mask_xy); + nir_build_store_deref(b, &deref_xy->def, src_xy, write_mask_xy); } int write_mask_zw = nir_intrinsic_write_mask(intr) & 0xc; if (write_mask_zw) { nir_def *src_zw = nir_channels(b, intr->src[1].ssa, nir_component_mask(intr->src[1].ssa->num_components) & 0xc); - nir_build_store_deref(b, &deref_zw->dest.ssa, src_zw, write_mask_zw >> 2); + nir_build_store_deref(b, &deref_zw->def, src_zw, write_mask_zw >> 2); } return NIR_LOWER_INSTR_PROGRESS_REPLACE; @@ -222,20 +222,20 @@ split_store_deref(nir_builder *b, nir_intrinsic_instr *intr, static nir_def * split_phi(nir_builder *b, nir_phi_instr *phi) { - nir_op vec_op = nir_op_vec(phi->dest.ssa.num_components); + nir_op vec_op = nir_op_vec(phi->def.num_components); nir_alu_instr *vec = nir_alu_instr_create(b->shader, vec_op); nir_def_init(&vec->instr, &vec->def, - phi->dest.ssa.num_components, 64); + phi->def.num_components, 64); - int num_comp[2] = { 2, phi->dest.ssa.num_components - 2 }; + int num_comp[2] = { 2, phi->def.num_components - 2 }; nir_phi_instr *new_phi[2]; for (unsigned i = 0; i < 2; i++) { new_phi[i] = nir_phi_instr_create(b->shader); - nir_def_init(&new_phi[i]->instr, &new_phi[i]->dest.ssa, num_comp[i], - phi->dest.ssa.bit_size); + nir_def_init(&new_phi[i]->instr, &new_phi[i]->def, num_comp[i], + phi->def.bit_size); nir_foreach_phi_src(src, phi) { /* Insert at the end of the predecessor but before the jump @@ -256,7 +256,7 @@ split_phi(nir_builder *b, nir_phi_instr *phi) } b->cursor = nir_after_instr(&phi->instr); - return merge_to_vec3_or_vec4(b, &new_phi[0]->dest.ssa, &new_phi[1]->dest.ssa); + return merge_to_vec3_or_vec4(b, &new_phi[0]->def, &new_phi[1]->def); }; static nir_def * diff --git a/src/compiler/nir/nir_split_per_member_structs.c b/src/compiler/nir/nir_split_per_member_structs.c index f65ca2f..4d87dd3 100644 --- a/src/compiler/nir/nir_split_per_member_structs.c +++ b/src/compiler/nir/nir_split_per_member_structs.c @@ -149,8 +149,8 @@ rewrite_deref_instr(nir_builder *b, nir_instr *instr, void *cb_data) b->cursor = nir_before_instr(&deref->instr); nir_deref_instr *member_deref = build_member_deref(b, nir_deref_instr_parent(deref), member); - nir_def_rewrite_uses(&deref->dest.ssa, - &member_deref->dest.ssa); + nir_def_rewrite_uses(&deref->def, + &member_deref->def); /* The referenced variable is no longer valid, clean up the deref */ nir_deref_instr_remove_if_unused(deref); diff --git a/src/compiler/nir/nir_split_vars.c b/src/compiler/nir/nir_split_vars.c index 02a3776..f2d1d4e 100644 --- a/src/compiler/nir/nir_split_vars.c +++ b/src/compiler/nir/nir_split_vars.c @@ -312,8 +312,8 @@ split_struct_derefs_impl(nir_function_impl *impl, } assert(new_deref->type == deref->type); - nir_def_rewrite_uses(&deref->dest.ssa, - &new_deref->dest.ssa); + nir_def_rewrite_uses(&deref->def, + &new_deref->def); nir_deref_instr_remove_if_unused(deref); } } @@ -834,9 +834,9 @@ split_array_access_impl(nir_function_impl *impl, */ if (intrin->intrinsic == nir_intrinsic_load_deref) { nir_def *u = - nir_undef(&b, intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_undef(&b, intrin->def.num_components, + intrin->def.bit_size); + nir_def_rewrite_uses(&intrin->def, u); } nir_instr_remove(&intrin->instr); @@ -867,7 +867,7 @@ split_array_access_impl(nir_function_impl *impl, /* Rewrite the deref source to point to the split one */ nir_instr_rewrite_src(&intrin->instr, &intrin->src[d], - nir_src_for_ssa(&new_deref->dest.ssa)); + nir_src_for_ssa(&new_deref->def)); nir_deref_instr_remove_if_unused(deref); } } @@ -1242,7 +1242,7 @@ find_used_components_impl(nir_function_impl *impl, switch (intrin->intrinsic) { case nir_intrinsic_load_deref: mark_deref_used(nir_src_as_deref(intrin->src[0]), - nir_def_components_read(&intrin->dest.ssa), 0, + nir_def_components_read(&intrin->def), 0, NULL, var_usage_map, modes, mem_ctx); break; @@ -1552,9 +1552,9 @@ shrink_vec_var_access_impl(nir_function_impl *impl, if (usage->comps_kept == 0 || vec_deref_is_oob(deref, usage)) { if (intrin->intrinsic == nir_intrinsic_load_deref) { nir_def *u = - nir_undef(&b, intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_undef(&b, intrin->def.num_components, + intrin->def.bit_size); + nir_def_rewrite_uses(&intrin->def, u); } nir_instr_remove(&intrin->instr); @@ -1572,27 +1572,27 @@ shrink_vec_var_access_impl(nir_function_impl *impl, b.cursor = nir_after_instr(&intrin->instr); nir_def *undef = - nir_undef(&b, 1, intrin->dest.ssa.bit_size); + nir_undef(&b, 1, intrin->def.bit_size); nir_def *vec_srcs[NIR_MAX_VEC_COMPONENTS]; unsigned c = 0; for (unsigned i = 0; i < intrin->num_components; i++) { if (usage->comps_kept & (1u << i)) - vec_srcs[i] = nir_channel(&b, &intrin->dest.ssa, c++); + vec_srcs[i] = nir_channel(&b, &intrin->def, c++); else vec_srcs[i] = undef; } nir_def *vec = nir_vec(&b, vec_srcs, intrin->num_components); - nir_def_rewrite_uses_after(&intrin->dest.ssa, + nir_def_rewrite_uses_after(&intrin->def, vec, vec->parent_instr); /* The SSA def is now only used by the swizzle. It's safe to * shrink the number of components. */ - assert(list_length(&intrin->dest.ssa.uses) == c); + assert(list_length(&intrin->def.uses) == c); intrin->num_components = c; - intrin->dest.ssa.num_components = c; + intrin->def.num_components = c; } else { nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin); diff --git a/src/compiler/nir/nir_to_lcssa.c b/src/compiler/nir/nir_to_lcssa.c index 6159789..6ef71f8 100644 --- a/src/compiler/nir/nir_to_lcssa.c +++ b/src/compiler/nir/nir_to_lcssa.c @@ -225,7 +225,7 @@ convert_loop_exit_for_ssa(nir_def *def, void *void_state) /* Initialize a phi-instruction */ nir_phi_instr *phi = nir_phi_instr_create(state->shader); - nir_def_init(&phi->instr, &phi->dest.ssa, def->num_components, + nir_def_init(&phi->instr, &phi->def, def->num_components, def->bit_size); /* Create a phi node with as many sources pointing to the same ssa_def as @@ -237,7 +237,7 @@ convert_loop_exit_for_ssa(nir_def *def, void *void_state) } nir_instr_insert_before_block(state->block_after_loop, &phi->instr); - nir_def *dest = &phi->dest.ssa; + nir_def *dest = &phi->def; /* deref instructions need a cast after the phi */ if (def->parent_instr->type == nir_instr_type_deref) { @@ -247,13 +247,13 @@ convert_loop_exit_for_ssa(nir_def *def, void *void_state) nir_deref_instr *instr = nir_instr_as_deref(def->parent_instr); cast->modes = instr->modes; cast->type = instr->type; - cast->parent = nir_src_for_ssa(&phi->dest.ssa); + cast->parent = nir_src_for_ssa(&phi->def); cast->cast.ptr_stride = nir_deref_instr_array_stride(instr); - nir_def_init(&cast->instr, &cast->dest.ssa, - phi->dest.ssa.num_components, phi->dest.ssa.bit_size); + nir_def_init(&cast->instr, &cast->def, + phi->def.num_components, phi->def.bit_size); nir_instr_insert(nir_after_phis(state->block_after_loop), &cast->instr); - dest = &cast->dest.ssa; + dest = &cast->def; } /* Run through all uses and rewrite those outside the loop to point to diff --git a/src/compiler/nir/nir_trivialize_registers.c b/src/compiler/nir/nir_trivialize_registers.c index dc6526c..196eabf 100644 --- a/src/compiler/nir/nir_trivialize_registers.c +++ b/src/compiler/nir/nir_trivialize_registers.c @@ -99,11 +99,11 @@ trivialize_load(nir_intrinsic_instr *load) assert(nir_is_load_reg(load)); nir_builder b = nir_builder_at(nir_after_instr(&load->instr)); - nir_def *copy = nir_mov(&b, &load->dest.ssa); - copy->divergent = load->dest.ssa.divergent; - nir_def_rewrite_uses_after(&load->dest.ssa, copy, copy->parent_instr); + nir_def *copy = nir_mov(&b, &load->def); + copy->divergent = load->def.divergent; + nir_def_rewrite_uses_after(&load->def, copy, copy->parent_instr); - assert(list_is_singular(&load->dest.ssa.uses)); + assert(list_is_singular(&load->def.uses)); } struct trivialize_src_state { @@ -304,7 +304,7 @@ trivialize_read_after_write(nir_intrinsic_instr *load, { assert(nir_is_load_reg(load)); - unsigned nr = load->dest.ssa.num_components; + unsigned nr = load->def.num_components; trivialize_reg_stores(load->src[0].ssa, nir_component_mask(nr), possibly_trivial_stores); } diff --git a/src/compiler/nir/nir_validate.c b/src/compiler/nir/nir_validate.c index b400632..f2ab225 100644 --- a/src/compiler/nir/nir_validate.c +++ b/src/compiler/nir/nir_validate.c @@ -316,8 +316,8 @@ validate_deref_instr(nir_deref_instr *instr, validate_state *state) /* The parent pointer value must have the same number of components * as the destination. */ - validate_src(&instr->parent, state, instr->dest.ssa.bit_size, - instr->dest.ssa.num_components); + validate_src(&instr->parent, state, instr->def.bit_size, + instr->def.num_components); nir_instr *parent_instr = instr->parent.ssa->parent_instr; @@ -357,7 +357,7 @@ validate_deref_instr(nir_deref_instr *instr, validate_state *state) if (instr->deref_type == nir_deref_type_array) { validate_src(&instr->arr.index, state, - instr->dest.ssa.bit_size, 1); + instr->def.bit_size, 1); } break; @@ -371,7 +371,7 @@ validate_deref_instr(nir_deref_instr *instr, validate_state *state) parent->deref_type == nir_deref_type_ptr_as_array || parent->deref_type == nir_deref_type_cast); validate_src(&instr->arr.index, state, - instr->dest.ssa.bit_size, 1); + instr->def.bit_size, 1); break; default: @@ -383,12 +383,12 @@ validate_deref_instr(nir_deref_instr *instr, validate_state *state) * want to let other compiler components such as SPIR-V decide how big * pointers should be. */ - validate_def(&instr->dest.ssa, state, 0, 0); + validate_def(&instr->def, state, 0, 0); /* Certain modes cannot be used as sources for phi instructions because * way too many passes assume that they can always chase deref chains. */ - nir_foreach_use_including_if(use, &instr->dest.ssa) { + nir_foreach_use_including_if(use, &instr->def) { /* Deref instructions as if conditions don't make sense because if * conditions expect well-formed Booleans. If you want to compare with * NULL, an explicit comparison operation should be used. @@ -477,8 +477,8 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state) case nir_intrinsic_load_reg: case nir_intrinsic_load_reg_indirect: validate_register_handle(instr->src[0], - instr->dest.ssa.num_components, - instr->dest.ssa.bit_size, state); + instr->def.num_components, + instr->def.bit_size, state); break; case nir_intrinsic_store_reg: @@ -552,7 +552,7 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state) } case nir_intrinsic_load_ubo_vec4: { - int bit_size = instr->dest.ssa.bit_size; + int bit_size = instr->def.bit_size; validate_assert(state, bit_size >= 8); validate_assert(state, (nir_intrinsic_component(instr) + instr->num_components) * @@ -587,7 +587,7 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state) case nir_intrinsic_load_per_primitive_output: case nir_intrinsic_load_push_constant: /* All memory load operations must load at least a byte */ - validate_assert(state, instr->dest.ssa.bit_size >= 8); + validate_assert(state, instr->def.bit_size >= 8); break; case nir_intrinsic_store_ssbo: @@ -645,7 +645,7 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state) } validate_assert(state, allowed); - validate_assert(state, instr->dest.ssa.bit_size == + validate_assert(state, instr->def.bit_size == util_format_get_blocksizebits(format)); } break; @@ -691,7 +691,7 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state) else dest_bit_size = dest_bit_size ? dest_bit_size : bit_sizes; - validate_def(&instr->dest.ssa, state, dest_bit_size, components_written); + validate_def(&instr->def, state, dest_bit_size, components_written); } if (!vectorized_intrinsic(instr)) @@ -847,12 +847,12 @@ validate_tex_instr(nir_tex_instr *instr, validate_state *state) if (instr->is_gather_implicit_lod) validate_assert(state, instr->op == nir_texop_tg4); - validate_def(&instr->dest.ssa, state, 0, nir_tex_instr_dest_size(instr)); + validate_def(&instr->def, state, 0, nir_tex_instr_dest_size(instr)); unsigned bit_size = nir_alu_type_get_type_size(instr->dest_type); validate_assert(state, (bit_size ? bit_size : 32) == - instr->dest.ssa.bit_size); + instr->def.bit_size); } static void @@ -924,7 +924,7 @@ validate_phi_instr(nir_phi_instr *instr, validate_state *state) * basic blocks, to avoid validating an SSA use before its definition. */ - validate_def(&instr->dest.ssa, state, 0, 0); + validate_def(&instr->def, state, 0, 0); exec_list_validate(&instr->srcs); validate_assert(state, exec_list_length(&instr->srcs) == @@ -1055,8 +1055,8 @@ validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state) exec_list_validate(&instr->srcs); nir_foreach_phi_src(src, instr) { if (src->pred == pred) { - validate_src(&src->src, state, instr->dest.ssa.bit_size, - instr->dest.ssa.num_components); + validate_src(&src->src, state, instr->def.bit_size, + instr->def.num_components); state->instr = NULL; return; } diff --git a/src/compiler/nir/tests/algebraic_tests.cpp b/src/compiler/nir/tests/algebraic_tests.cpp index a616b17..65e15a5 100644 --- a/src/compiler/nir/tests/algebraic_tests.cpp +++ b/src/compiler/nir/tests/algebraic_tests.cpp @@ -49,7 +49,7 @@ algebraic_test_base::algebraic_test_base() void algebraic_test_base::test_op(nir_op op, nir_def *src0, nir_def *src1, nir_def *src2, nir_def *src3, const char *desc) { - nir_def *res_deref = &nir_build_deref_var(b, res_var)->dest.ssa; + nir_def *res_deref = &nir_build_deref_var(b, res_var)->def; /* create optimized expression */ nir_intrinsic_instr *optimized_instr = nir_build_store_deref( diff --git a/src/compiler/nir/tests/builder_tests.cpp b/src/compiler/nir/tests/builder_tests.cpp index c62f9fe..576f430 100644 --- a/src/compiler/nir/tests/builder_tests.cpp +++ b/src/compiler/nir/tests/builder_tests.cpp @@ -51,7 +51,7 @@ protected: nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_deref); store->num_components = val->num_components; - store->src[0] = nir_src_for_ssa(&nir_build_deref_var(b, var)->dest.ssa); + store->src[0] = nir_src_for_ssa(&nir_build_deref_var(b, var)->def); store->src[1] = nir_src_for_ssa(val); nir_intrinsic_set_write_mask(store, ((1 << val->num_components) - 1)); nir_builder_instr_insert(b, &store->instr); diff --git a/src/compiler/nir/tests/dce_tests.cpp b/src/compiler/nir/tests/dce_tests.cpp index 79554e1..660d26c 100644 --- a/src/compiler/nir/tests/dce_tests.cpp +++ b/src/compiler/nir/tests/dce_tests.cpp @@ -44,7 +44,7 @@ nir_phi_instr *create_one_source_phi(nir_shader *shader, nir_block *pred, { nir_phi_instr *phi = nir_phi_instr_create(shader); nir_phi_instr_add_src(phi, pred, nir_src_for_ssa(def)); - nir_def_init(&phi->instr, &phi->dest.ssa, def->num_components, + nir_def_init(&phi->instr, &phi->def, def->num_components, def->bit_size); return phi; @@ -88,7 +88,7 @@ TEST_F(nir_opt_dce_test, return_before_loop) nir_phi_instr *phi = create_one_source_phi(b->shader, one->parent_instr->block, one); nir_instr_insert_before_block(one->parent_instr->block, &phi->instr); - nir_store_var(b, var, &phi->dest.ssa, 0x1); + nir_store_var(b, var, &phi->def, 0x1); nir_pop_loop(b, loop); diff --git a/src/compiler/nir/tests/load_store_vectorizer_tests.cpp b/src/compiler/nir/tests/load_store_vectorizer_tests.cpp index 5460585..ff707f7 100644 --- a/src/compiler/nir/tests/load_store_vectorizer_tests.cpp +++ b/src/compiler/nir/tests/load_store_vectorizer_tests.cpp @@ -25,7 +25,7 @@ /* This is a macro so you get good line numbers */ #define EXPECT_INSTR_SWIZZLES(instr, load, expected_swizzle) \ - EXPECT_EQ((instr)->src[0].src.ssa, &(load)->dest.ssa); \ + EXPECT_EQ((instr)->src[0].src.ssa, &(load)->def); \ EXPECT_EQ(swizzle(instr, 0), expected_swizzle); namespace { @@ -162,7 +162,7 @@ nir_load_store_vectorize_test::get_resource(uint32_t binding, bool ssbo) nir_intrinsic_instr *res = nir_intrinsic_instr_create( b->shader, nir_intrinsic_vulkan_resource_index); - nir_def_init(&res->instr, &res->dest.ssa, 1, 32); + nir_def_init(&res->instr, &res->def, 1, 32); res->num_components = 1; res->src[0] = nir_src_for_ssa(nir_imm_zero(b, 1, 32)); nir_intrinsic_set_desc_type( @@ -170,8 +170,8 @@ nir_load_store_vectorize_test::get_resource(uint32_t binding, bool ssbo) nir_intrinsic_set_desc_set(res, 0); nir_intrinsic_set_binding(res, binding); nir_builder_instr_insert(b, &res->instr); - res_map[binding] = &res->dest.ssa; - return &res->dest.ssa; + res_map[binding] = &res->def; + return &res->def; } nir_intrinsic_instr * @@ -197,7 +197,7 @@ nir_load_store_vectorize_test::create_indirect_load( return NULL; } nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, intrinsic); - nir_def_init(&load->instr, &load->dest.ssa, components, bit_size); + nir_def_init(&load->instr, &load->def, components, bit_size); load->num_components = components; if (res) { load->src[0] = nir_src_for_ssa(res); @@ -227,7 +227,7 @@ nir_load_store_vectorize_test::create_indirect_load( } nir_builder_instr_insert(b, &load->instr); - nir_alu_instr *mov = nir_instr_as_alu(nir_mov(b, &load->dest.ssa)->parent_instr); + nir_alu_instr *mov = nir_instr_as_alu(nir_mov(b, &load->def)->parent_instr); movs[id] = mov; loads[id] = &mov->src[0]; @@ -258,7 +258,7 @@ nir_load_store_vectorize_test::create_indirect_store( return; } nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, intrinsic); - nir_def_init(&store->instr, &store->dest.ssa, components, bit_size); + nir_def_init(&store->instr, &store->def, components, bit_size); store->num_components = components; if (res) { store->src[0] = nir_src_for_ssa(value); @@ -376,8 +376,8 @@ TEST_F(nir_load_store_vectorize_test, ubo_load_adjacent) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ubo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ubo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); ASSERT_EQ(nir_intrinsic_range_base(load), 0); ASSERT_EQ(nir_intrinsic_range(load), 8); ASSERT_EQ(nir_src_as_uint(load->src[1]), 0); @@ -398,8 +398,8 @@ TEST_F(nir_load_store_vectorize_test, ubo_load_intersecting) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ubo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ubo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 3); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 3); ASSERT_EQ(nir_intrinsic_range_base(load), 0); ASSERT_EQ(nir_intrinsic_range(load), 12); ASSERT_EQ(nir_src_as_uint(load->src[1]), 0); @@ -421,13 +421,13 @@ TEST_F(nir_load_store_vectorize_test, ubo_load_intersecting_range) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ubo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ubo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 4); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 4); ASSERT_EQ(nir_intrinsic_range_base(load), 0); ASSERT_EQ(nir_intrinsic_range(load), 16); ASSERT_EQ(nir_src_as_uint(load->src[1]), 0); - ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa); - ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa); + ASSERT_EQ(loads[0x1]->src.ssa, &load->def); + ASSERT_EQ(loads[0x2]->src.ssa, &load->def); ASSERT_EQ(loads[0x1]->swizzle[0], 0); ASSERT_EQ(loads[0x1]->swizzle[1], 1); ASSERT_EQ(loads[0x1]->swizzle[2], 2); @@ -448,13 +448,13 @@ TEST_F(nir_load_store_vectorize_test, ubo_load_identical) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ubo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ubo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 1); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 1); ASSERT_EQ(nir_intrinsic_range_base(load), 0); ASSERT_EQ(nir_intrinsic_range(load), 4); ASSERT_EQ(nir_src_as_uint(load->src[1]), 0); - ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa); - ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa); + ASSERT_EQ(loads[0x1]->src.ssa, &load->def); + ASSERT_EQ(loads[0x2]->src.ssa, &load->def); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x2], load, "x"); } @@ -486,8 +486,8 @@ TEST_F(nir_load_store_vectorize_test, push_const_load_adjacent) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_push_constant), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_push_constant, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); ASSERT_EQ(nir_src_as_uint(load->src[0]), 0); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y"); @@ -506,8 +506,8 @@ TEST_F(nir_load_store_vectorize_test, push_const_load_adjacent_base) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_push_constant), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_push_constant, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); ASSERT_EQ(nir_src_as_uint(load->src[0]), 0); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y"); @@ -526,8 +526,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); ASSERT_EQ(nir_src_as_uint(load->src[1]), 0); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y"); @@ -547,8 +547,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_indirect) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); ASSERT_EQ(load->src[1].ssa, index_base); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y"); @@ -569,8 +569,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_indirect_sub) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); ASSERT_EQ(load->src[1].ssa, index_base_prev); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y"); @@ -593,8 +593,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_indirect_neg_stride) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y"); @@ -621,8 +621,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_identical_store_adjacent) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 1); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 1); ASSERT_EQ(nir_src_as_uint(load->src[1]), 0); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x"); @@ -691,8 +691,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_store_identical) ASSERT_EQ(count_intrinsics(nir_intrinsic_store_ssbo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); ASSERT_EQ(nir_src_as_uint(load->src[1]), 0); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x3], load, "y"); @@ -918,8 +918,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_8_8_16) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 8); - ASSERT_EQ(load->dest.ssa.num_components, 4); + ASSERT_EQ(load->def.bit_size, 8); + ASSERT_EQ(load->def.num_components, 4); ASSERT_EQ(nir_src_as_uint(load->src[1]), 0); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y"); @@ -934,8 +934,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_8_8_16) high = nir_instr_as_alu(high->parent_instr)->src[0].src.ssa; ASSERT_TRUE(test_alu(low->parent_instr, nir_op_u2u16)); ASSERT_TRUE(test_alu(high->parent_instr, nir_op_u2u16)); - ASSERT_TRUE(test_alu_def(low->parent_instr, 0, &load->dest.ssa, 2)); - ASSERT_TRUE(test_alu_def(high->parent_instr, 0, &load->dest.ssa, 3)); + ASSERT_TRUE(test_alu_def(low->parent_instr, 0, &load->def, 2)); + ASSERT_TRUE(test_alu_def(high->parent_instr, 0, &load->def, 3)); } TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_32_32_64) @@ -951,8 +951,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_32_32_64) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 4); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 4); ASSERT_EQ(nir_src_as_uint(load->src[1]), 0); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "xy"); @@ -978,8 +978,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_32_32_64_64) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 64); - ASSERT_EQ(load->dest.ssa.num_components, 3); + ASSERT_EQ(load->def.bit_size, 64); + ASSERT_EQ(load->def.num_components, 3); ASSERT_EQ(nir_src_as_uint(load->src[1]), 0); EXPECT_INSTR_SWIZZLES(movs[0x3], load, "z"); @@ -1011,8 +1011,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_intersecting_32_32_64) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 3); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 3); ASSERT_EQ(nir_src_as_uint(load->src[1]), 4); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "xy"); @@ -1178,8 +1178,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_adjacent) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); deref = nir_src_as_deref(load->src[0]); ASSERT_EQ(deref->deref_type, nir_deref_type_cast); @@ -1200,7 +1200,7 @@ TEST_F(nir_load_store_vectorize_test, shared_load_distant_64bit) { nir_variable *var = nir_variable_create(b->shader, nir_var_mem_shared, glsl_array_type(glsl_uint_type(), 4, 0), "var"); nir_deref_instr *deref = nir_build_deref_var(b, var); - nir_def_init(&deref->instr, &deref->dest.ssa, 1, 64); + nir_def_init(&deref->instr, &deref->def, 1, 64); create_shared_load(nir_build_deref_array_imm(b, deref, 0x100000000), 0x1); create_shared_load(nir_build_deref_array_imm(b, deref, 0x200000001), 0x2); @@ -1230,8 +1230,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_adjacent_indirect) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); deref = nir_src_as_deref(load->src[0]); ASSERT_EQ(deref->deref_type, nir_deref_type_cast); @@ -1266,8 +1266,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_adjacent_indirect_sub) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); deref = nir_src_as_deref(load->src[0]); ASSERT_EQ(deref->deref_type, nir_deref_type_cast); @@ -1303,8 +1303,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_struct) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); deref = nir_src_as_deref(load->src[0]); ASSERT_EQ(deref->deref_type, nir_deref_type_cast); @@ -1340,8 +1340,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_identical_store_adjacent) ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 1); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 1); deref = nir_src_as_deref(load->src[0]); ASSERT_EQ(deref->deref_type, nir_deref_type_array); @@ -1391,8 +1391,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_adjacent_store_identical) ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); deref = nir_src_as_deref(load->src[0]); ASSERT_EQ(deref->deref_type, nir_deref_type_cast); @@ -1425,8 +1425,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_bool) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); deref = nir_src_as_deref(load->src[0]); ASSERT_EQ(deref->deref_type, nir_deref_type_cast); @@ -1442,8 +1442,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_bool) /* The loaded value is converted to Boolean by (loaded != 0). */ ASSERT_TRUE(test_alu(loads[0x1]->src.ssa->parent_instr, nir_op_ine)); ASSERT_TRUE(test_alu(loads[0x2]->src.ssa->parent_instr, nir_op_ine)); - ASSERT_TRUE(test_alu_def(loads[0x1]->src.ssa->parent_instr, 0, &load->dest.ssa, 0)); - ASSERT_TRUE(test_alu_def(loads[0x2]->src.ssa->parent_instr, 0, &load->dest.ssa, 1)); + ASSERT_TRUE(test_alu_def(loads[0x1]->src.ssa->parent_instr, 0, &load->def, 0)); + ASSERT_TRUE(test_alu_def(loads[0x2]->src.ssa->parent_instr, 0, &load->def, 1)); } TEST_F(nir_load_store_vectorize_test, shared_load_bool_mixed) @@ -1465,8 +1465,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_bool_mixed) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); deref = nir_src_as_deref(load->src[0]); ASSERT_EQ(deref->deref_type, nir_deref_type_cast); @@ -1481,7 +1481,7 @@ TEST_F(nir_load_store_vectorize_test, shared_load_bool_mixed) /* The loaded value is converted to Boolean by (loaded != 0). */ ASSERT_TRUE(test_alu(loads[0x1]->src.ssa->parent_instr, nir_op_ine)); - ASSERT_TRUE(test_alu_def(loads[0x1]->src.ssa->parent_instr, 0, &load->dest.ssa, 0)); + ASSERT_TRUE(test_alu_def(loads[0x1]->src.ssa->parent_instr, 0, &load->def, 0)); EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y"); } @@ -1595,8 +1595,8 @@ TEST_F(nir_load_store_vectorize_test, push_const_load_adjacent_complex_indirect) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_push_constant), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_push_constant, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); ASSERT_EQ(load->src[0].ssa, low); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y"); @@ -1650,8 +1650,8 @@ TEST_F(nir_load_store_vectorize_test, DISABLED_ssbo_alias2) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 1); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 1); ASSERT_EQ(load->src[1].ssa, offset); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x"); @@ -1694,8 +1694,8 @@ TEST_F(nir_load_store_vectorize_test, DISABLED_ssbo_alias4) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 1); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 1); ASSERT_EQ(load->src[1].ssa, offset); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x"); @@ -1729,8 +1729,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_alias6) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 1); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 1); ASSERT_EQ(nir_src_as_uint(load->src[1]), 0); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x"); @@ -1765,9 +1765,9 @@ TEST_F(nir_load_store_vectorize_test, DISABLED_shared_alias0) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 1); - ASSERT_EQ(load->src[0].ssa, &load_deref->dest.ssa); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 1); + ASSERT_EQ(load->src[0].ssa, &load_deref->def); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x"); } @@ -1790,9 +1790,9 @@ TEST_F(nir_load_store_vectorize_test, shared_alias1) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 1); - ASSERT_EQ(load->src[0].ssa, &load_deref->dest.ssa); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 1); + ASSERT_EQ(load->src[0].ssa, &load_deref->def); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x"); } @@ -1885,14 +1885,14 @@ TEST_F(nir_load_store_vectorize_test, ssbo_offset_overflow_robust_indirect_strid ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2); nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 1); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 1); ASSERT_EQ(load->src[1].ssa, offset); EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x"); load = get_intrinsic(nir_intrinsic_load_ssbo, 1); - ASSERT_EQ(load->dest.ssa.bit_size, 32); - ASSERT_EQ(load->dest.ssa.num_components, 2); + ASSERT_EQ(load->def.bit_size, 32); + ASSERT_EQ(load->def.num_components, 2); ASSERT_EQ(load->src[1].ssa, offset_4); EXPECT_INSTR_SWIZZLES(movs[0x2], load, "x"); EXPECT_INSTR_SWIZZLES(movs[0x3], load, "y"); diff --git a/src/compiler/nir/tests/loop_analyze_tests.cpp b/src/compiler/nir/tests/loop_analyze_tests.cpp index a66f72a..6cd7681 100644 --- a/src/compiler/nir/tests/loop_analyze_tests.cpp +++ b/src/compiler/nir/tests/loop_analyze_tests.cpp @@ -83,13 +83,13 @@ loop_builder(nir_builder *b, loop_builder_param p) nir_loop *loop = nir_push_loop(b); { - nir_def_init(&phi->instr, &phi->dest.ssa, ssa_0->num_components, + nir_def_init(&phi->instr, &phi->def, ssa_0->num_components, ssa_0->bit_size); nir_phi_instr_add_src(phi, ssa_0->parent_instr->block, nir_src_for_ssa(ssa_0)); - nir_def *ssa_5 = &phi->dest.ssa; + nir_def *ssa_5 = &phi->def; nir_def *ssa_3 = p.cond_instr(b, ssa_5, ssa_1); nir_if *nif = nir_push_if(b, ssa_3); @@ -153,13 +153,13 @@ loop_builder_invert(nir_builder *b, loop_builder_invert_param p) nir_loop *loop = nir_push_loop(b); { - nir_def_init(&phi->instr, &phi->dest.ssa, ssa_0->num_components, + nir_def_init(&phi->instr, &phi->def, ssa_0->num_components, ssa_0->bit_size); nir_phi_instr_add_src(phi, ssa_0->parent_instr->block, nir_src_for_ssa(ssa_0)); - nir_def *ssa_5 = &phi->dest.ssa; + nir_def *ssa_5 = &phi->def; nir_def *ssa_3 = p.incr_instr(b, ssa_5, ssa_1); diff --git a/src/compiler/nir/tests/loop_unroll_tests.cpp b/src/compiler/nir/tests/loop_unroll_tests.cpp index 80580c8..903bdca 100644 --- a/src/compiler/nir/tests/loop_unroll_tests.cpp +++ b/src/compiler/nir/tests/loop_unroll_tests.cpp @@ -116,19 +116,19 @@ loop_unroll_test_helper(nir_builder *bld, nir_def *init, nir_block *head_block = nir_loop_first_block(loop); nir_phi_instr *phi = nir_phi_instr_create(bld->shader); - nir_def_init(&phi->instr, &phi->dest.ssa, 1, 32); + nir_def_init(&phi->instr, &phi->def, 1, 32); nir_phi_instr_add_src(phi, top_block, nir_src_for_ssa(init)); nir_def *cond = cond_instr(bld, - (reverse ? limit : &phi->dest.ssa), - (reverse ? &phi->dest.ssa : limit)); + (reverse ? limit : &phi->def), + (reverse ? &phi->def : limit)); nir_if *nif = nir_push_if(bld, cond); nir_jump(bld, nir_jump_break); nir_pop_if(bld, nif); - nir_def *var = incr_instr(bld, &phi->dest.ssa, step); + nir_def *var = incr_instr(bld, &phi->def, step); nir_phi_instr_add_src(phi, nir_cursor_current_block(bld->cursor), nir_src_for_ssa(var)); diff --git a/src/compiler/nir/tests/lower_alu_width_tests.cpp b/src/compiler/nir/tests/lower_alu_width_tests.cpp index b5bb5d0..f9482ba 100644 --- a/src/compiler/nir/tests/lower_alu_width_tests.cpp +++ b/src/compiler/nir/tests/lower_alu_width_tests.cpp @@ -44,7 +44,7 @@ TEST_F(nir_lower_alu_width_test, fdot_order) b, nir_imm_vec3(b, 1.7014118346046923e+38, 1.7014118346046923e+38, 8.507059173023462e+37), nir_imm_vec3(b, -0.5, 1.5, 1.0)); nir_intrinsic_instr *store = - nir_build_store_deref(b, &nir_build_deref_var(b, res_var)->dest.ssa, val); + nir_build_store_deref(b, &nir_build_deref_var(b, res_var)->def, val); nir_lower_alu_width(b->shader, NULL, NULL); nir_opt_constant_folding(b->shader); diff --git a/src/compiler/nir/tests/opt_if_tests.cpp b/src/compiler/nir/tests/opt_if_tests.cpp index 028915f..8ba0091 100644 --- a/src/compiler/nir/tests/opt_if_tests.cpp +++ b/src/compiler/nir/tests/opt_if_tests.cpp @@ -113,7 +113,7 @@ TEST_F(nir_opt_if_test, opt_if_simplification_single_source_phi_after_if) nir_phi_instr_add_src(phi, then_block, nir_src_for_ssa(one)); - nir_def_init(&phi->instr, &phi->dest.ssa, + nir_def_init(&phi->instr, &phi->def, one->num_components, one->bit_size); nir_builder_instr_insert(b, &phi->instr); @@ -135,14 +135,14 @@ TEST_F(nir_opt_if_test, opt_if_alu_of_phi_progress) nir_loop *loop = nir_push_loop(b); { - nir_def_init(&phi->instr, &phi->dest.ssa, + nir_def_init(&phi->instr, &phi->def, x->num_components, x->bit_size); nir_phi_instr_add_src(phi, x->parent_instr->block, nir_src_for_ssa(x)); - nir_def *y = nir_iadd(b, &phi->dest.ssa, two); + nir_def *y = nir_iadd(b, &phi->def, two); nir_store_var(b, out_var, - nir_imul(b, &phi->dest.ssa, two), 1); + nir_imul(b, &phi->def, two), 1); nir_phi_instr_add_src(phi, nir_cursor_current_block(b->cursor), nir_src_for_ssa(y)); } diff --git a/src/compiler/nir/tests/opt_shrink_vectors_tests.cpp b/src/compiler/nir/tests/opt_shrink_vectors_tests.cpp index 0fbbb05..aacbb3f 100644 --- a/src/compiler/nir/tests/opt_shrink_vectors_tests.cpp +++ b/src/compiler/nir/tests/opt_shrink_vectors_tests.cpp @@ -276,11 +276,11 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_simple) nir_def *loop_max = nir_imm_float(b, 3.0); nir_phi_instr *const phi = nir_phi_instr_create(b->shader); - nir_def *phi_def = &phi->dest.ssa; + nir_def *phi_def = &phi->def; nir_loop *loop = nir_push_loop(b); - nir_def_init(&phi->instr, &phi->dest.ssa, v->num_components, v->bit_size); + nir_def_init(&phi->instr, &phi->def, v->num_components, v->bit_size); nir_phi_instr_add_src(phi, v->parent_instr->block, nir_src_for_ssa(v)); @@ -382,11 +382,11 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_swizzle) nir_def *loop_max = nir_imm_float(b, 3.0); nir_phi_instr *const phi = nir_phi_instr_create(b->shader); - nir_def *phi_def = &phi->dest.ssa; + nir_def *phi_def = &phi->def; nir_loop *loop = nir_push_loop(b); - nir_def_init(&phi->instr, &phi->dest.ssa, v->num_components, v->bit_size); + nir_def_init(&phi->instr, &phi->def, v->num_components, v->bit_size); nir_phi_instr_add_src(phi, v->parent_instr->block, nir_src_for_ssa(v)); @@ -489,11 +489,11 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_phi_out) nir_def *loop_max = nir_imm_float(b, 3.0); nir_phi_instr *const phi = nir_phi_instr_create(b->shader); - nir_def *phi_def = &phi->dest.ssa; + nir_def *phi_def = &phi->def; nir_loop *loop = nir_push_loop(b); - nir_def_init(&phi->instr, &phi->dest.ssa, v->num_components, v->bit_size); + nir_def_init(&phi->instr, &phi->def, v->num_components, v->bit_size); nir_phi_instr_add_src(phi, v->parent_instr->block, nir_src_for_ssa(v)); diff --git a/src/compiler/nir/tests/range_analysis_tests.cpp b/src/compiler/nir/tests/range_analysis_tests.cpp index fbf419a..a2c756e 100644 --- a/src/compiler/nir/tests/range_analysis_tests.cpp +++ b/src/compiler/nir/tests/range_analysis_tests.cpp @@ -273,10 +273,10 @@ TEST_F(unsigned_upper_bound_test, loop_phi_bcsel) nir_def *cond = nir_imm_false(b); nir_phi_instr *const phi = nir_phi_instr_create(b->shader); - nir_def_init(&phi->instr, &phi->dest.ssa, 1, 32); + nir_def_init(&phi->instr, &phi->def, 1, 32); nir_push_loop(b); - nir_def *sel = nir_bcsel(b, cond, &phi->dest.ssa, two); + nir_def *sel = nir_bcsel(b, cond, &phi->def, two); nir_pop_loop(b, NULL); nir_phi_instr_add_src(phi, zero->parent_instr->block, @@ -289,7 +289,7 @@ TEST_F(unsigned_upper_bound_test, loop_phi_bcsel) nir_validate_shader(b->shader, NULL); struct hash_table *range_ht = _mesa_pointer_hash_table_create(NULL); - nir_scalar scalar = nir_get_ssa_scalar(&phi->dest.ssa, 0); + nir_scalar scalar = nir_get_ssa_scalar(&phi->def, 0); EXPECT_EQ(nir_unsigned_upper_bound(b->shader, range_ht, scalar, NULL), 2); _mesa_hash_table_destroy(range_ht, NULL); } diff --git a/src/compiler/nir/tests/vars_tests.cpp b/src/compiler/nir/tests/vars_tests.cpp index 02b1d37..341b87a 100644 --- a/src/compiler/nir/tests/vars_tests.cpp +++ b/src/compiler/nir/tests/vars_tests.cpp @@ -2163,7 +2163,7 @@ TEST_F(nir_split_vars_test, simple_dont_split) nir_deref_instr *temp_deref = nir_build_deref_var(b, temp); for (int i = 0; i < 4; i++) - nir_store_deref(b, nir_build_deref_array(b, temp_deref, &ind_deref->dest.ssa), nir_load_var(b, in[i]), 1); + nir_store_deref(b, nir_build_deref_array(b, temp_deref, &ind_deref->def), nir_load_var(b, in[i]), 1); nir_validate_shader(b->shader, NULL); ASSERT_EQ(count_derefs(nir_deref_type_array), 4); @@ -2188,7 +2188,7 @@ TEST_F(nir_split_vars_test, twolevel_dont_split_lvl_0) nir_deref_instr *temp_deref = nir_build_deref_var(b, temp); for (int i = 0; i < 4; i++) { - nir_deref_instr *level0 = nir_build_deref_array(b, temp_deref, &ind_deref->dest.ssa); + nir_deref_instr *level0 = nir_build_deref_array(b, temp_deref, &ind_deref->def); for (int j = 0; j < 6; j++) { nir_deref_instr *level1 = nir_build_deref_array_imm(b, level0, j); nir_store_deref(b, level1, nir_load_var(b, in[i]), 1); @@ -2221,7 +2221,7 @@ TEST_F(nir_split_vars_test, twolevel_dont_split_lvl_1) nir_deref_instr *level0 = nir_build_deref_array_imm(b, temp_deref, i); for (int j = 0; j < 6; j++) { /* just add the inner index to get some different derefs */ - nir_deref_instr *level1 = nir_build_deref_array(b, level0, nir_iadd_imm(b, &ind_deref->dest.ssa, j)); + nir_deref_instr *level1 = nir_build_deref_array(b, level0, nir_iadd_imm(b, &ind_deref->def, j)); nir_store_deref(b, level1, nir_load_var(b, in[i]), 1); } } diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c index bc06999..4e5b06c 100644 --- a/src/compiler/spirv/spirv_to_nir.c +++ b/src/compiler/spirv/spirv_to_nir.c @@ -443,7 +443,7 @@ vtn_push_image(struct vtn_builder *b, uint32_t value_id, { struct vtn_type *type = vtn_get_value_type(b, value_id); vtn_assert(type->base_type == vtn_base_type_image); - struct vtn_value *value = vtn_push_nir_ssa(b, value_id, &deref->dest.ssa); + struct vtn_value *value = vtn_push_nir_ssa(b, value_id, &deref->def); value->propagated_non_uniform = propagate_non_uniform; } @@ -460,7 +460,7 @@ nir_def * vtn_sampled_image_to_nir_ssa(struct vtn_builder *b, struct vtn_sampled_image si) { - return nir_vec2(&b->nb, &si.image->dest.ssa, &si.sampler->dest.ssa); + return nir_vec2(&b->nb, &si.image->def, &si.sampler->def); } static void @@ -2956,7 +2956,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, nir_tex_src srcs[10]; /* 10 should be enough */ nir_tex_src *p = srcs; - p->src = nir_src_for_ssa(&image->dest.ssa); + p->src = nir_src_for_ssa(&image->def); p->src_type = nir_tex_src_texture_deref; p++; @@ -2970,7 +2970,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, vtn_fail_if(sampler == NULL, "%s requires an image of type OpTypeSampledImage", spirv_op_to_string(opcode)); - p->src = nir_src_for_ssa(&sampler->dest.ssa); + p->src = nir_src_for_ssa(&sampler->def); p->src_type = nir_tex_src_sampler_deref; p++; break; @@ -3295,7 +3295,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, instr->dest_type = dest_type; - nir_def_init(&instr->instr, &instr->dest.ssa, + nir_def_init(&instr->instr, &instr->def, nir_tex_instr_dest_size(instr), 32); vtn_assert(glsl_get_vector_elements(ret_type->type) == @@ -3336,12 +3336,12 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, if (is_sparse) { struct vtn_ssa_value *dest = vtn_create_ssa_value(b, struct_type->type); unsigned result_size = glsl_get_vector_elements(ret_type->type); - dest->elems[0]->def = nir_channel(&b->nb, &instr->dest.ssa, result_size); - dest->elems[1]->def = nir_trim_vector(&b->nb, &instr->dest.ssa, + dest->elems[0]->def = nir_channel(&b->nb, &instr->def, result_size); + dest->elems[1]->def = nir_trim_vector(&b->nb, &instr->def, result_size); vtn_push_ssa_value(b, w[2], dest); } else { - vtn_push_nir_ssa(b, w[2], &instr->dest.ssa); + vtn_push_nir_ssa(b, w[2], &instr->def); } } @@ -3641,7 +3641,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, if (nir_intrinsic_has_atomic_op(intrin)) nir_intrinsic_set_atomic_op(intrin, translate_atomic_op(opcode)); - intrin->src[0] = nir_src_for_ssa(&image.image->dest.ssa); + intrin->src[0] = nir_src_for_ssa(&image.image->def); nir_intrinsic_set_image_dim(intrin, glsl_get_sampler_dim(image.image->type)); nir_intrinsic_set_image_array(intrin, glsl_sampler_type_is_array(image.image->type)); @@ -3776,12 +3776,12 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, opcode == SpvOpImageQuerySizeLod) bit_size = MIN2(bit_size, 32); - nir_def_init(&intrin->instr, &intrin->dest.ssa, + nir_def_init(&intrin->instr, &intrin->def, nir_intrinsic_dest_components(intrin), bit_size); nir_builder_instr_insert(&b->nb, &intrin->instr); - nir_def *result = nir_trim_vector(&b->nb, &intrin->dest.ssa, + nir_def *result = nir_trim_vector(&b->nb, &intrin->def, dest_components); if (opcode == SpvOpImageQuerySize || @@ -3792,7 +3792,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, struct vtn_ssa_value *dest = vtn_create_ssa_value(b, struct_type->type); unsigned res_type_size = glsl_get_vector_elements(type->type); dest->elems[0]->def = nir_channel(&b->nb, result, res_type_size); - if (intrin->dest.ssa.bit_size != 32) + if (intrin->def.bit_size != 32) dest->elems[0]->def = nir_u2u32(&b->nb, dest->elems[0]->def); dest->elems[1]->def = nir_trim_vector(&b->nb, result, res_type_size); vtn_push_ssa_value(b, w[2], dest); @@ -3933,7 +3933,7 @@ vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode, nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr); nir_intrinsic_op op = get_uniform_nir_atomic_op(b, opcode); atomic = nir_intrinsic_instr_create(b->nb.shader, op); - atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa); + atomic->src[0] = nir_src_for_ssa(&deref->def); /* SSBO needs to initialize index/offset. In this case we don't need to, * as that info is already stored on the ptr->var->var nir_variable (see @@ -3970,7 +3970,7 @@ vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode, const struct glsl_type *deref_type = deref->type; nir_intrinsic_op op = get_deref_nir_atomic_op(b, opcode); atomic = nir_intrinsic_instr_create(b->nb.shader, op); - atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa); + atomic->src[0] = nir_src_for_ssa(&deref->def); if (nir_intrinsic_has_atomic_op(atomic)) nir_intrinsic_set_atomic_op(atomic, translate_atomic_op(opcode)); @@ -4042,20 +4042,20 @@ vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode, if (opcode == SpvOpAtomicFlagTestAndSet) { /* map atomic flag to a 32-bit atomic integer. */ - nir_def_init(&atomic->instr, &atomic->dest.ssa, 1, 32); + nir_def_init(&atomic->instr, &atomic->def, 1, 32); } else { - nir_def_init(&atomic->instr, &atomic->dest.ssa, + nir_def_init(&atomic->instr, &atomic->def, glsl_get_vector_elements(type->type), glsl_get_bit_size(type->type)); - vtn_push_nir_ssa(b, w[2], &atomic->dest.ssa); + vtn_push_nir_ssa(b, w[2], &atomic->def); } } nir_builder_instr_insert(&b->nb, &atomic->instr); if (opcode == SpvOpAtomicFlagTestAndSet) { - vtn_push_nir_ssa(b, w[2], nir_i2b(&b->nb, &atomic->dest.ssa)); + vtn_push_nir_ssa(b, w[2], nir_i2b(&b->nb, &atomic->def)); } if (after_semantics) vtn_emit_memory_barrier(b, scope, after_semantics); @@ -5780,7 +5780,7 @@ vtn_handle_ray_intrinsic(struct vtn_builder *b, SpvOp opcode, payload = vtn_get_call_payload_for_location(b, w[11]); else payload = vtn_nir_deref(b, w[11]); - intrin->src[10] = nir_src_for_ssa(&payload->dest.ssa); + intrin->src[10] = nir_src_for_ssa(&payload->def); nir_builder_instr_insert(&b->nb, &intrin->instr); break; } @@ -5790,9 +5790,9 @@ vtn_handle_ray_intrinsic(struct vtn_builder *b, SpvOp opcode, nir_intrinsic_report_ray_intersection); intrin->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def); intrin->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def); - nir_def_init(&intrin->instr, &intrin->dest.ssa, 1, 1); + nir_def_init(&intrin->instr, &intrin->def, 1, 1); nir_builder_instr_insert(&b->nb, &intrin->instr); - vtn_push_nir_ssa(b, w[2], &intrin->dest.ssa); + vtn_push_nir_ssa(b, w[2], &intrin->def); break; } @@ -5818,7 +5818,7 @@ vtn_handle_ray_intrinsic(struct vtn_builder *b, SpvOp opcode, payload = vtn_get_call_payload_for_location(b, w[2]); else payload = vtn_nir_deref(b, w[2]); - intrin->src[1] = nir_src_for_ssa(&payload->dest.ssa); + intrin->src[1] = nir_src_for_ssa(&payload->def); nir_builder_instr_insert(&b->nb, &intrin->instr); break; } @@ -6684,11 +6684,11 @@ vtn_emit_kernel_entry_point_wrapper(struct vtn_builder *b, nir_local_variable_create(impl, in_var->type, "copy_in"); nir_copy_var(&b->nb, copy_var, in_var); call->params[i] = - nir_src_for_ssa(&nir_build_deref_var(&b->nb, copy_var)->dest.ssa); + nir_src_for_ssa(&nir_build_deref_var(&b->nb, copy_var)->def); } else if (param_type->base_type == vtn_base_type_image || param_type->base_type == vtn_base_type_sampler) { /* Don't load the var, just pass a deref of it */ - call->params[i] = nir_src_for_ssa(&nir_build_deref_var(&b->nb, in_var)->dest.ssa); + call->params[i] = nir_src_for_ssa(&nir_build_deref_var(&b->nb, in_var)->def); } else { call->params[i] = nir_src_for_ssa(nir_load_var(&b->nb, in_var)); } diff --git a/src/compiler/spirv/vtn_amd.c b/src/compiler/spirv/vtn_amd.c index c1bba1b..9cc2cb6 100644 --- a/src/compiler/spirv/vtn_amd.c +++ b/src/compiler/spirv/vtn_amd.c @@ -84,9 +84,9 @@ vtn_handle_amd_shader_ballot_instruction(struct vtn_builder *b, SpvOp ext_opcode const struct glsl_type *dest_type = vtn_get_type(b, w[1])->type; nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->nb.shader, op); - nir_def_init_for_type(&intrin->instr, &intrin->dest.ssa, dest_type); + nir_def_init_for_type(&intrin->instr, &intrin->def, dest_type); if (nir_intrinsic_infos[op].src_components[0] == 0) - intrin->num_components = intrin->dest.ssa.num_components; + intrin->num_components = intrin->def.num_components; for (unsigned i = 0; i < num_args; i++) intrin->src[i] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[i + 5])); @@ -113,7 +113,7 @@ vtn_handle_amd_shader_ballot_instruction(struct vtn_builder *b, SpvOp ext_opcode } nir_builder_instr_insert(&b->nb, &intrin->instr); - vtn_push_nir_ssa(b, w[2], &intrin->dest.ssa); + vtn_push_nir_ssa(b, w[2], &intrin->def); return true; } @@ -212,11 +212,11 @@ vtn_handle_amd_shader_explicit_vertex_parameter_instruction(struct vtn_builder * vec_deref = deref; deref = nir_deref_instr_parent(deref); } - intrin->src[0] = nir_src_for_ssa(&deref->dest.ssa); + intrin->src[0] = nir_src_for_ssa(&deref->def); intrin->src[1] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[6])); intrin->num_components = glsl_get_vector_elements(deref->type); - nir_def_init(&intrin->instr, &intrin->dest.ssa, + nir_def_init(&intrin->instr, &intrin->def, glsl_get_vector_elements(deref->type), glsl_get_bit_size(deref->type)); @@ -225,10 +225,10 @@ vtn_handle_amd_shader_explicit_vertex_parameter_instruction(struct vtn_builder * nir_def *def; if (vec_array_deref) { assert(vec_deref); - def = nir_vector_extract(&b->nb, &intrin->dest.ssa, + def = nir_vector_extract(&b->nb, &intrin->def, vec_deref->arr.index.ssa); } else { - def = &intrin->dest.ssa; + def = &intrin->def; } vtn_push_nir_ssa(b, w[2], def); diff --git a/src/compiler/spirv/vtn_cfg.c b/src/compiler/spirv/vtn_cfg.c index 9ce95f7..541fbc2 100644 --- a/src/compiler/spirv/vtn_cfg.c +++ b/src/compiler/spirv/vtn_cfg.c @@ -124,7 +124,7 @@ vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode, glsl_get_bare_type(ret_type->type), "return_tmp"); ret_deref = nir_build_deref_var(&b->nb, ret_tmp); - call->params[param_idx++] = nir_src_for_ssa(&ret_deref->dest.ssa); + call->params[param_idx++] = nir_src_for_ssa(&ret_deref->def); } for (unsigned i = 0; i < vtn_callee->type->length; i++) { diff --git a/src/compiler/spirv/vtn_glsl450.c b/src/compiler/spirv/vtn_glsl450.c index e5ccab3..c7d9002 100644 --- a/src/compiler/spirv/vtn_glsl450.c +++ b/src/compiler/spirv/vtn_glsl450.c @@ -666,7 +666,7 @@ handle_glsl450_interpolation(struct vtn_builder *b, enum GLSLstd450 opcode, vec_deref = deref; deref = nir_deref_instr_parent(deref); } - intrin->src[0] = nir_src_for_ssa(&deref->dest.ssa); + intrin->src[0] = nir_src_for_ssa(&deref->def); switch (opcode) { case GLSLstd450InterpolateAtCentroid: @@ -680,13 +680,13 @@ handle_glsl450_interpolation(struct vtn_builder *b, enum GLSLstd450 opcode, } intrin->num_components = glsl_get_vector_elements(deref->type); - nir_def_init(&intrin->instr, &intrin->dest.ssa, + nir_def_init(&intrin->instr, &intrin->def, glsl_get_vector_elements(deref->type), glsl_get_bit_size(deref->type)); nir_builder_instr_insert(&b->nb, &intrin->instr); - nir_def *def = &intrin->dest.ssa; + nir_def *def = &intrin->def; if (vec_array_deref) def = nir_vector_extract(&b->nb, def, vec_deref->arr.index.ssa); diff --git a/src/compiler/spirv/vtn_opencl.c b/src/compiler/spirv/vtn_opencl.c index 494f687..7e89d40 100644 --- a/src/compiler/spirv/vtn_opencl.c +++ b/src/compiler/spirv/vtn_opencl.c @@ -186,7 +186,7 @@ static bool call_mangled_function(struct vtn_builder *b, glsl_get_bare_type(dest_type->type), "return_tmp"); ret_deref = nir_build_deref_var(&b->nb, ret_tmp); - call->params[param_idx++] = nir_src_for_ssa(&ret_deref->dest.ssa); + call->params[param_idx++] = nir_src_for_ssa(&ret_deref->def); } for (unsigned i = 0; i < num_srcs; i++) @@ -830,7 +830,7 @@ handle_printf(struct vtn_builder *b, uint32_t opcode, /* Lastly, the actual intrinsic */ nir_def *fmt_idx = nir_imm_int(&b->nb, info_idx); - nir_def *ret = nir_printf(&b->nb, fmt_idx, &deref_var->dest.ssa); + nir_def *ret = nir_printf(&b->nb, fmt_idx, &deref_var->def); vtn_push_nir_ssa(b, w_dest[1], ret); } diff --git a/src/compiler/spirv/vtn_subgroup.c b/src/compiler/spirv/vtn_subgroup.c index b3a7eb4..ea079c7 100644 --- a/src/compiler/spirv/vtn_subgroup.c +++ b/src/compiler/spirv/vtn_subgroup.c @@ -52,8 +52,8 @@ vtn_build_subgroup_instr(struct vtn_builder *b, nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->nb.shader, nir_op); - nir_def_init_for_type(&intrin->instr, &intrin->dest.ssa, dst->type); - intrin->num_components = intrin->dest.ssa.num_components; + nir_def_init_for_type(&intrin->instr, &intrin->def, dst->type); + intrin->num_components = intrin->def.num_components; intrin->src[0] = nir_src_for_ssa(src0->def); if (index) @@ -64,7 +64,7 @@ vtn_build_subgroup_instr(struct vtn_builder *b, nir_builder_instr_insert(&b->nb, &intrin->instr); - dst->def = &intrin->dest.ssa; + dst->def = &intrin->def; return dst; } @@ -81,9 +81,9 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode, "OpGroupNonUniformElect must return a Bool"); nir_intrinsic_instr *elect = nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_elect); - nir_def_init_for_type(&elect->instr, &elect->dest.ssa, dest_type->type); + nir_def_init_for_type(&elect->instr, &elect->def, dest_type->type); nir_builder_instr_insert(&b->nb, &elect->instr); - vtn_push_nir_ssa(b, w[2], &elect->dest.ssa); + vtn_push_nir_ssa(b, w[2], &elect->def); break; } @@ -95,10 +95,10 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode, nir_intrinsic_instr *ballot = nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_ballot); ballot->src[0] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[3 + has_scope])); - nir_def_init(&ballot->instr, &ballot->dest.ssa, 4, 32); + nir_def_init(&ballot->instr, &ballot->def, 4, 32); ballot->num_components = 4; nir_builder_instr_insert(&b->nb, &ballot->instr); - vtn_push_nir_ssa(b, w[2], &ballot->dest.ssa); + vtn_push_nir_ssa(b, w[2], &ballot->def); break; } @@ -114,11 +114,11 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode, intrin->src[0] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[4])); intrin->src[1] = nir_src_for_ssa(nir_load_subgroup_invocation(&b->nb)); - nir_def_init_for_type(&intrin->instr, &intrin->dest.ssa, + nir_def_init_for_type(&intrin->instr, &intrin->def, dest_type->type); nir_builder_instr_insert(&b->nb, &intrin->instr); - vtn_push_nir_ssa(b, w[2], &intrin->dest.ssa); + vtn_push_nir_ssa(b, w[2], &intrin->def); break; } @@ -169,11 +169,11 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode, if (src1) intrin->src[1] = nir_src_for_ssa(src1); - nir_def_init_for_type(&intrin->instr, &intrin->dest.ssa, + nir_def_init_for_type(&intrin->instr, &intrin->def, dest_type->type); nir_builder_instr_insert(&b->nb, &intrin->instr); - vtn_push_nir_ssa(b, w[2], &intrin->dest.ssa); + vtn_push_nir_ssa(b, w[2], &intrin->def); break; } @@ -262,11 +262,11 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode, if (nir_intrinsic_infos[op].src_components[0] == 0) intrin->num_components = src0->num_components; intrin->src[0] = nir_src_for_ssa(src0); - nir_def_init_for_type(&intrin->instr, &intrin->dest.ssa, + nir_def_init_for_type(&intrin->instr, &intrin->def, dest_type->type); nir_builder_instr_insert(&b->nb, &intrin->instr); - vtn_push_nir_ssa(b, w[2], &intrin->dest.ssa); + vtn_push_nir_ssa(b, w[2], &intrin->def); break; } diff --git a/src/compiler/spirv/vtn_variables.c b/src/compiler/spirv/vtn_variables.c index 414b1b5..a18d5a1 100644 --- a/src/compiler/spirv/vtn_variables.c +++ b/src/compiler/spirv/vtn_variables.c @@ -249,13 +249,13 @@ vtn_variable_resource_index(struct vtn_builder *b, struct vtn_variable *var, nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, var->mode)); nir_address_format addr_format = vtn_mode_to_address_format(b, var->mode); - nir_def_init(&instr->instr, &instr->dest.ssa, + nir_def_init(&instr->instr, &instr->def, nir_address_format_num_components(addr_format), nir_address_format_bit_size(addr_format)); - instr->num_components = instr->dest.ssa.num_components; + instr->num_components = instr->def.num_components; nir_builder_instr_insert(&b->nb, &instr->instr); - return &instr->dest.ssa; + return &instr->def; } static nir_def * @@ -272,13 +272,13 @@ vtn_resource_reindex(struct vtn_builder *b, enum vtn_variable_mode mode, nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, mode)); nir_address_format addr_format = vtn_mode_to_address_format(b, mode); - nir_def_init(&instr->instr, &instr->dest.ssa, + nir_def_init(&instr->instr, &instr->def, nir_address_format_num_components(addr_format), nir_address_format_bit_size(addr_format)); - instr->num_components = instr->dest.ssa.num_components; + instr->num_components = instr->def.num_components; nir_builder_instr_insert(&b->nb, &instr->instr); - return &instr->dest.ssa; + return &instr->def; } static nir_def * @@ -294,13 +294,13 @@ vtn_descriptor_load(struct vtn_builder *b, enum vtn_variable_mode mode, nir_intrinsic_set_desc_type(desc_load, vk_desc_type_for_mode(b, mode)); nir_address_format addr_format = vtn_mode_to_address_format(b, mode); - nir_def_init(&desc_load->instr, &desc_load->dest.ssa, + nir_def_init(&desc_load->instr, &desc_load->def, nir_address_format_num_components(addr_format), nir_address_format_bit_size(addr_format)); - desc_load->num_components = desc_load->dest.ssa.num_components; + desc_load->num_components = desc_load->def.num_components; nir_builder_instr_insert(&b->nb, &desc_load->instr); - return &desc_load->dest.ssa; + return &desc_load->def; } static struct vtn_pointer * @@ -432,9 +432,9 @@ vtn_pointer_dereference(struct vtn_builder *b, assert(base->var && base->var->var); tail = nir_build_deref_var(&b->nb, base->var->var); if (base->ptr_type && base->ptr_type->type) { - tail->dest.ssa.num_components = + tail->def.num_components = glsl_get_vector_elements(base->ptr_type->type); - tail->dest.ssa.bit_size = glsl_get_bit_size(base->ptr_type->type); + tail->def.bit_size = glsl_get_bit_size(base->ptr_type->type); } } @@ -442,11 +442,11 @@ vtn_pointer_dereference(struct vtn_builder *b, /* We start with a deref cast to get the stride. Hopefully, we'll be * able to delete that cast eventually. */ - tail = nir_build_deref_cast(&b->nb, &tail->dest.ssa, tail->modes, + tail = nir_build_deref_cast(&b->nb, &tail->def, tail->modes, tail->type, base->ptr_type->stride); nir_def *index = vtn_access_link_as_ssa(b, deref_chain->link[0], 1, - tail->dest.ssa.bit_size); + tail->def.bit_size); tail = nir_build_deref_ptr_as_array(&b->nb, tail, index); idx++; } @@ -460,7 +460,7 @@ vtn_pointer_dereference(struct vtn_builder *b, } else { nir_def *arr_index = vtn_access_link_as_ssa(b, deref_chain->link[idx], 1, - tail->dest.ssa.bit_size); + tail->def.bit_size); tail = nir_build_deref_array(&b->nb, tail, arr_index); type = type->array_element; } @@ -1784,7 +1784,7 @@ vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr) return ptr->block_index; } else { - return &vtn_pointer_to_deref(b, ptr)->dest.ssa; + return &vtn_pointer_to_deref(b, ptr)->def; } } @@ -1833,9 +1833,9 @@ vtn_pointer_from_ssa(struct vtn_builder *b, nir_def *ssa, */ ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode, deref_type, ptr_type->stride); - ptr->deref->dest.ssa.num_components = + ptr->deref->def.num_components = glsl_get_vector_elements(ptr_type->type); - ptr->deref->dest.ssa.bit_size = glsl_get_bit_size(ptr_type->type); + ptr->deref->def.bit_size = glsl_get_bit_size(ptr_type->type); } return ptr; @@ -2773,9 +2773,9 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, nir_address_format_bit_size(addr_format), nir_address_format_null_value(addr_format)); - nir_def *valid = nir_build_deref_mode_is(&b->nb, 1, &src_deref->dest.ssa, nir_mode); + nir_def *valid = nir_build_deref_mode_is(&b->nb, 1, &src_deref->def, nir_mode); vtn_push_nir_ssa(b, w[2], nir_bcsel(&b->nb, valid, - &src_deref->dest.ssa, + &src_deref->def, null_value)); break; } @@ -2797,13 +2797,13 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, nir_deref_instr *src_deref = vtn_nir_deref(b, w[3]); nir_def *global_bit = - nir_bcsel(&b->nb, nir_build_deref_mode_is(&b->nb, 1, &src_deref->dest.ssa, + nir_bcsel(&b->nb, nir_build_deref_mode_is(&b->nb, 1, &src_deref->def, nir_var_mem_global), nir_imm_int(&b->nb, SpvMemorySemanticsCrossWorkgroupMemoryMask), nir_imm_int(&b->nb, 0)); nir_def *shared_bit = - nir_bcsel(&b->nb, nir_build_deref_mode_is(&b->nb, 1, &src_deref->dest.ssa, + nir_bcsel(&b->nb, nir_build_deref_mode_is(&b->nb, 1, &src_deref->def, nir_var_mem_shared), nir_imm_int(&b->nb, SpvMemorySemanticsWorkgroupMemoryMask), nir_imm_int(&b->nb, 0)); @@ -2819,12 +2819,12 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_load_deref_block_intel); - load->src[0] = nir_src_for_ssa(&src->dest.ssa); - nir_def_init_for_type(&load->instr, &load->dest.ssa, res_type->type); - load->num_components = load->dest.ssa.num_components; + load->src[0] = nir_src_for_ssa(&src->def); + nir_def_init_for_type(&load->instr, &load->def, res_type->type); + load->num_components = load->def.num_components; nir_builder_instr_insert(&b->nb, &load->instr); - vtn_push_nir_ssa(b, w[2], &load->dest.ssa); + vtn_push_nir_ssa(b, w[2], &load->def); break; } @@ -2835,7 +2835,7 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_store_deref_block_intel); - store->src[0] = nir_src_for_ssa(&dest->dest.ssa); + store->src[0] = nir_src_for_ssa(&dest->def); store->src[1] = nir_src_for_ssa(data); store->num_components = data->num_components; nir_builder_instr_insert(&b->nb, &store->instr); diff --git a/src/freedreno/ir3/ir3_a6xx.c b/src/freedreno/ir3/ir3_a6xx.c index c5c3654..6ed32dc 100644 --- a/src/freedreno/ir3/ir3_a6xx.c +++ b/src/freedreno/ir3/ir3_a6xx.c @@ -52,7 +52,7 @@ emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr, ldib->dsts[0]->wrmask = MASK(intr->num_components); ldib->cat6.iim_val = intr->num_components; ldib->cat6.d = 1; - ldib->cat6.type = intr->dest.ssa.bit_size == 16 ? TYPE_U16 : TYPE_U32; + ldib->cat6.type = intr->def.bit_size == 16 ? TYPE_U16 : TYPE_U32; ldib->barrier_class = IR3_BARRIER_BUFFER_R; ldib->barrier_conflict = IR3_BARRIER_BUFFER_W; ir3_handle_bindless_cat6(ldib, intr->src[0]); @@ -352,7 +352,7 @@ emit_intrinsic_load_global_ir3(struct ir3_context *ctx, create_immed(b, 0), 0, create_immed(b, dest_components), 0); } - load->cat6.type = type_uint_size(intr->dest.ssa.bit_size); + load->cat6.type = type_uint_size(intr->def.bit_size); load->dsts[0]->wrmask = MASK(dest_components); load->barrier_class = IR3_BARRIER_BUFFER_R; diff --git a/src/freedreno/ir3/ir3_compiler_nir.c b/src/freedreno/ir3/ir3_compiler_nir.c index 395e268..8220e75 100644 --- a/src/freedreno/ir3/ir3_compiler_nir.c +++ b/src/freedreno/ir3/ir3_compiler_nir.c @@ -882,7 +882,7 @@ emit_intrinsic_load_ubo_ldc(struct ir3_context *ctx, nir_intrinsic_instr *intr, ldc->dsts[0]->wrmask = MASK(ncomp); ldc->cat6.iim_val = ncomp; ldc->cat6.d = nir_intrinsic_component(intr); - ldc->cat6.type = utype_def(&intr->dest.ssa); + ldc->cat6.type = utype_def(&intr->def); ir3_handle_bindless_cat6(ldc, intr->src[0]); if (ldc->flags & IR3_INSTR_B) @@ -1074,7 +1074,7 @@ emit_intrinsic_load_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr, ldl = ir3_LDL(b, offset, 0, create_immed(b, base), 0, create_immed(b, intr->num_components), 0); - ldl->cat6.type = utype_def(&intr->dest.ssa); + ldl->cat6.type = utype_def(&intr->def); ldl->dsts[0]->wrmask = MASK(intr->num_components); ldl->barrier_class = IR3_BARRIER_SHARED_R; @@ -1131,7 +1131,7 @@ emit_intrinsic_load_shared_ir3(struct ir3_context *ctx, if (ctx->so->type == MESA_SHADER_TESS_CTRL && ctx->compiler->tess_use_shared) load->opc = OPC_LDL; - load->cat6.type = utype_def(&intr->dest.ssa); + load->cat6.type = utype_def(&intr->def); load->dsts[0]->wrmask = MASK(intr->num_components); load->barrier_class = IR3_BARRIER_SHARED_R; @@ -1282,7 +1282,7 @@ emit_intrinsic_load_scratch(struct ir3_context *ctx, nir_intrinsic_instr *intr, ldp = ir3_LDP(b, offset, 0, create_immed(b, base), 0, create_immed(b, intr->num_components), 0); - ldp->cat6.type = utype_def(&intr->dest.ssa); + ldp->cat6.type = utype_def(&intr->def); ldp->dsts[0]->wrmask = MASK(intr->num_components); ldp->barrier_class = IR3_BARRIER_PRIVATE_R; @@ -1484,7 +1484,7 @@ emit_intrinsic_image_size_tex(struct ir3_context *ctx, struct tex_src_info info = get_image_ssbo_samp_tex_src(ctx, &intr->src[0], true); struct ir3_instruction *sam, *lod; unsigned flags, ncoords = ir3_get_image_coords(intr, &flags); - type_t dst_type = intr->dest.ssa.bit_size == 16 ? TYPE_U16 : TYPE_U32; + type_t dst_type = intr->def.bit_size == 16 ? TYPE_U16 : TYPE_U32; info.flags |= flags; assert(nir_src_as_uint(intr->src[1]) == 0); @@ -1525,7 +1525,7 @@ emit_intrinsic_load_ssbo(struct ir3_context *ctx, { /* Note: isam currently can't handle vectorized loads/stores */ if (!(nir_intrinsic_access(intr) & ACCESS_CAN_REORDER) || - intr->dest.ssa.num_components > 1) { + intr->def.num_components > 1) { ctx->funcs->emit_intrinsic_load_ssbo(ctx, intr, dst); return; } @@ -1535,9 +1535,9 @@ emit_intrinsic_load_ssbo(struct ir3_context *ctx, struct ir3_instruction *coords = ir3_collect(b, offset, create_immed(b, 0)); struct tex_src_info info = get_image_ssbo_samp_tex_src(ctx, &intr->src[0], false); - unsigned num_components = intr->dest.ssa.num_components; + unsigned num_components = intr->def.num_components; struct ir3_instruction *sam = - emit_sam(ctx, OPC_ISAM, info, utype_for_size(intr->dest.ssa.bit_size), + emit_sam(ctx, OPC_ISAM, info, utype_for_size(intr->def.bit_size), MASK(num_components), coords, NULL); ir3_handle_nonuniform(sam, intr); @@ -1811,7 +1811,7 @@ get_frag_coord(struct ir3_context *ctx, nir_intrinsic_instr *intr) ctx->frag_coord = ir3_create_collect(b, xyzw, 4); } - ctx->so->fragcoord_compmask |= nir_def_components_read(&intr->dest.ssa); + ctx->so->fragcoord_compmask |= nir_def_components_read(&intr->def); return ctx->frag_coord; } @@ -1902,7 +1902,7 @@ emit_intrinsic_reduce(struct ir3_context *ctx, nir_intrinsic_instr *intr) struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0]; nir_op nir_reduce_op = (nir_op) nir_intrinsic_reduction_op(intr); reduce_op_t reduce_op = get_reduce_op(nir_reduce_op); - unsigned dst_size = intr->dest.ssa.bit_size; + unsigned dst_size = intr->def.bit_size; unsigned flags = (ir3_bitsize(ctx, dst_size) == 16) ? IR3_REG_HALF : 0; /* Note: the shared reg is initialized to the identity, so we need it to @@ -1972,7 +1972,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) int idx; if (info->has_dest) { - dst = ir3_get_def(ctx, &intr->dest.ssa, dest_components); + dst = ir3_get_def(ctx, &intr->def, dest_components); } else { dst = NULL; } @@ -2046,14 +2046,14 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) for (int i = 0; i < dest_components; i++) { dst[i] = create_uniform_typed( b, idx + i, - intr->dest.ssa.bit_size == 16 ? TYPE_F16 : TYPE_F32); + intr->def.bit_size == 16 ? TYPE_F16 : TYPE_F32); } } else { src = ir3_get_src(ctx, &intr->src[0]); for (int i = 0; i < dest_components; i++) { dst[i] = create_uniform_indirect( b, idx + i, - intr->dest.ssa.bit_size == 16 ? TYPE_F16 : TYPE_F32, + intr->def.bit_size == 16 ? TYPE_F16 : TYPE_F32, ir3_get_addr0(ctx, src[0], 1)); } /* NOTE: if relative addressing is used, we set @@ -2543,7 +2543,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) case nir_intrinsic_ballot: { struct ir3_instruction *ballot; - unsigned components = intr->dest.ssa.num_components; + unsigned components = intr->def.num_components; if (nir_src_is_const(intr->src[0]) && nir_src_as_bool(intr->src[0])) { /* ballot(true) is just MOVMSK */ ballot = ir3_MOVMSK(ctx->block, components); @@ -2567,7 +2567,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0]; struct ir3_instruction *idx = ir3_get_src(ctx, &intr->src[1])[0]; - type_t dst_type = type_uint_size(intr->dest.ssa.bit_size); + type_t dst_type = type_uint_size(intr->def.bit_size); if (dst_type != TYPE_U32) idx = ir3_COV(ctx->block, idx, TYPE_U32, dst_type); @@ -2580,21 +2580,21 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) case nir_intrinsic_quad_swap_horizontal: { struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0]; dst[0] = ir3_QUAD_SHUFFLE_HORIZ(ctx->block, src, 0); - dst[0]->cat5.type = type_uint_size(intr->dest.ssa.bit_size); + dst[0]->cat5.type = type_uint_size(intr->def.bit_size); break; } case nir_intrinsic_quad_swap_vertical: { struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0]; dst[0] = ir3_QUAD_SHUFFLE_VERT(ctx->block, src, 0); - dst[0]->cat5.type = type_uint_size(intr->dest.ssa.bit_size); + dst[0]->cat5.type = type_uint_size(intr->def.bit_size); break; } case nir_intrinsic_quad_swap_diagonal: { struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0]; dst[0] = ir3_QUAD_SHUFFLE_DIAG(ctx->block, src, 0); - dst[0]->cat5.type = type_uint_size(intr->dest.ssa.bit_size); + dst[0]->cat5.type = type_uint_size(intr->def.bit_size); break; } @@ -2661,7 +2661,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) } if (info->has_dest) - ir3_put_def(ctx, &intr->dest.ssa); + ir3_put_def(ctx, &intr->def); } static void @@ -2901,12 +2901,12 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) type_t type; opc_t opc = 0; - ncomp = tex->dest.ssa.num_components; + ncomp = tex->def.num_components; coord = off = ddx = ddy = NULL; lod = proj = compare = sample_index = NULL; - dst = ir3_get_def(ctx, &tex->dest.ssa, ncomp); + dst = ir3_get_def(ctx, &tex->def, ncomp); for (unsigned i = 0; i < tex->num_srcs; i++) { switch (tex->src[i].src_type) { @@ -3190,7 +3190,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) type_float(type) ? fui(swizzle - 4) : (swizzle - 4)); for (int i = 0; i < 4; i++) dst[i] = imm; - ir3_put_def(ctx, &tex->dest.ssa); + ir3_put_def(ctx, &tex->def); return; } opc = OPC_GATHER4R + swizzle; @@ -3284,7 +3284,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) /* GETLOD returns results in 4.8 fixed point */ if (opc == OPC_GETLOD) { - bool half = tex->dest.ssa.bit_size == 16; + bool half = tex->def.bit_size == 16; struct ir3_instruction *factor = half ? create_immed_typed(b, _mesa_float_to_half(1.0 / 256), TYPE_F16) : create_immed(b, fui(1.0 / 256)); @@ -3296,7 +3296,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) } } - ir3_put_def(ctx, &tex->dest.ssa); + ir3_put_def(ctx, &tex->def); } static void @@ -3307,7 +3307,7 @@ emit_tex_info(struct ir3_context *ctx, nir_tex_instr *tex, unsigned idx) type_t dst_type = get_tex_dest_type(tex); struct tex_src_info info = get_tex_samp_tex_src(ctx, tex); - dst = ir3_get_def(ctx, &tex->dest.ssa, 1); + dst = ir3_get_def(ctx, &tex->def, 1); sam = emit_sam(ctx, OPC_GETINFO, info, dst_type, 1 << idx, NULL, NULL); @@ -3322,7 +3322,7 @@ emit_tex_info(struct ir3_context *ctx, nir_tex_instr *tex, unsigned idx) if (ctx->compiler->levels_add_one) dst[0] = ir3_ADD_U(b, dst[0], 0, create_immed(b, 1), 0); - ir3_put_def(ctx, &tex->dest.ssa); + ir3_put_def(ctx, &tex->def); } static void @@ -3344,7 +3344,7 @@ emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex) if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE) coords = 2; - dst = ir3_get_def(ctx, &tex->dest.ssa, 4); + dst = ir3_get_def(ctx, &tex->def, 4); int lod_idx = nir_tex_instr_src_index(tex, nir_tex_src_lod); compile_assert(ctx, lod_idx >= 0); @@ -3377,7 +3377,7 @@ emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex) } } - ir3_put_def(ctx, &tex->dest.ssa); + ir3_put_def(ctx, &tex->def); } /* phi instructions are left partially constructed. We don't resolve @@ -3391,9 +3391,9 @@ emit_phi(struct ir3_context *ctx, nir_phi_instr *nphi) struct ir3_instruction *phi, **dst; /* NOTE: phi's should be lowered to scalar at this point */ - compile_assert(ctx, nphi->dest.ssa.num_components == 1); + compile_assert(ctx, nphi->def.num_components == 1); - dst = ir3_get_def(ctx, &nphi->dest.ssa, 1); + dst = ir3_get_def(ctx, &nphi->def, 1); phi = ir3_instr_create(ctx->block, OPC_META_PHI, 1, exec_list_length(&nphi->srcs)); @@ -3402,7 +3402,7 @@ emit_phi(struct ir3_context *ctx, nir_phi_instr *nphi) dst[0] = phi; - ir3_put_def(ctx, &nphi->dest.ssa); + ir3_put_def(ctx, &nphi->def); } static struct ir3_block *get_block(struct ir3_context *ctx, diff --git a/src/freedreno/ir3/ir3_context.c b/src/freedreno/ir3/ir3_context.c index 2775473..4cc3038 100644 --- a/src/freedreno/ir3/ir3_context.c +++ b/src/freedreno/ir3/ir3_context.c @@ -509,7 +509,7 @@ ir3_declare_array(struct ir3_context *ctx, nir_intrinsic_instr *decl) MAX2(1, nir_intrinsic_num_array_elems(decl)); compile_assert(ctx, arr->length > 0); - arr->r = &decl->dest.ssa; + arr->r = &decl->def; arr->half = ir3_bitsize(ctx, nir_intrinsic_bit_size(decl)) <= 16; list_addtail(&arr->node, &ctx->ir->array_list); } diff --git a/src/freedreno/ir3/ir3_image.c b/src/freedreno/ir3/ir3_image.c index b868329..7d420c8 100644 --- a/src/freedreno/ir3/ir3_image.c +++ b/src/freedreno/ir3/ir3_image.c @@ -116,7 +116,7 @@ type_t ir3_get_type_for_image_intrinsic(const nir_intrinsic_instr *instr) { const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; - int bit_size = info->has_dest ? instr->dest.ssa.bit_size : nir_src_bit_size(instr->src[3]); + int bit_size = info->has_dest ? instr->def.bit_size : nir_src_bit_size(instr->src[3]); nir_alu_type type = nir_type_uint; switch (instr->intrinsic) { diff --git a/src/freedreno/ir3/ir3_nir.c b/src/freedreno/ir3/ir3_nir.c index 90d2708..a70a48c 100644 --- a/src/freedreno/ir3/ir3_nir.c +++ b/src/freedreno/ir3/ir3_nir.c @@ -222,7 +222,7 @@ ir3_nir_lower_ssbo_size_instr(nir_builder *b, nir_instr *instr, void *data) { uint8_t ssbo_size_to_bytes_shift = *(uint8_t *) data; nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); - return nir_ishl_imm(b, &intr->dest.ssa, ssbo_size_to_bytes_shift); + return nir_ishl_imm(b, &intr->def, ssbo_size_to_bytes_shift); } static bool diff --git a/src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c b/src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c index 9408f5c..e57763a 100644 --- a/src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c +++ b/src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c @@ -326,10 +326,10 @@ lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b, } nir_def *uniform = - nir_load_uniform(b, instr->num_components, instr->dest.ssa.bit_size, + nir_load_uniform(b, instr->num_components, instr->def.bit_size, uniform_offset, .base = const_offset); - nir_def_rewrite_uses(&instr->dest.ssa, uniform); + nir_def_rewrite_uses(&instr->def, uniform); nir_instr_remove(&instr->instr); @@ -594,7 +594,7 @@ ir3_nir_lower_load_const_instr(nir_builder *b, nir_instr *in_instr, void *data) } unsigned num_components = instr->num_components; - if (instr->dest.ssa.bit_size == 16) { + if (instr->def.bit_size == 16) { /* We can't do 16b loads -- either from LDC (32-bit only in any of our * traces, and disasm that doesn't look like it really supports it) or * from the constant file (where CONSTANT_DEMOTION_ENABLE means we get @@ -614,7 +614,7 @@ ir3_nir_lower_load_const_instr(nir_builder *b, nir_instr *in_instr, void *data) .align_offset = nir_intrinsic_align_offset(instr), .range_base = base, .range = nir_intrinsic_range(instr)); - if (instr->dest.ssa.bit_size == 16) { + if (instr->def.bit_size == 16) { result = nir_bitcast_vector(b, result, 16); result = nir_trim_vector(b, result, instr->num_components); } diff --git a/src/freedreno/ir3/ir3_nir_lower_64b.c b/src/freedreno/ir3/ir3_nir_lower_64b.c index 6037fd1..9e4dfae 100644 --- a/src/freedreno/ir3/ir3_nir_lower_64b.c +++ b/src/freedreno/ir3/ir3_nir_lower_64b.c @@ -50,7 +50,7 @@ lower_64b_intrinsics_filter(const nir_instr *instr, const void *unused) if (nir_intrinsic_dest_components(intr) == 0) return false; - return intr->dest.ssa.bit_size == 64; + return intr->def.bit_size == 64; } static nir_def * @@ -106,7 +106,7 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused) unsigned num_comp = nir_intrinsic_dest_components(intr); - nir_def *def = &intr->dest.ssa; + nir_def *def = &intr->def; def->bit_size = 32; /* load_kernel_input is handled specially, lowering to two 32b inputs: @@ -144,10 +144,10 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused) load->num_components = 2; load->src[offset_src_idx] = nir_src_for_ssa(off); - nir_def_init(&load->instr, &load->dest.ssa, 2, 32); + nir_def_init(&load->instr, &load->def, 2, 32); nir_builder_instr_insert(b, &load->instr); - components[i] = nir_pack_64_2x32(b, &load->dest.ssa); + components[i] = nir_pack_64_2x32(b, &load->def); off = nir_iadd_imm(b, off, 8); } @@ -257,12 +257,12 @@ lower_64b_global(nir_builder *b, nir_instr *instr, void *unused) if (intr->intrinsic == nir_intrinsic_global_atomic) { return nir_global_atomic_ir3( - b, intr->dest.ssa.bit_size, addr, + b, intr->def.bit_size, addr, nir_ssa_for_src(b, intr->src[1], 1), .atomic_op = nir_intrinsic_atomic_op(intr)); } else if (intr->intrinsic == nir_intrinsic_global_atomic_swap) { return nir_global_atomic_swap_ir3( - b, intr->dest.ssa.bit_size, addr, + b, intr->def.bit_size, addr, nir_ssa_for_src(b, intr->src[1], 1), nir_ssa_for_src(b, intr->src[2], 1), .atomic_op = nir_intrinsic_atomic_op(intr)); @@ -274,7 +274,7 @@ lower_64b_global(nir_builder *b, nir_instr *instr, void *unused) for (unsigned off = 0; off < num_comp;) { unsigned c = MIN2(num_comp - off, 4); nir_def *val = nir_load_global_ir3( - b, c, intr->dest.ssa.bit_size, + b, c, intr->def.bit_size, addr, nir_imm_int(b, off)); for (unsigned i = 0; i < c; i++) { components[off++] = nir_channel(b, val, i); diff --git a/src/freedreno/ir3/ir3_nir_lower_io_offsets.c b/src/freedreno/ir3/ir3_nir_lower_io_offsets.c index 867c81b..019c1bb 100644 --- a/src/freedreno/ir3/ir3_nir_lower_io_offsets.c +++ b/src/freedreno/ir3/ir3_nir_lower_io_offsets.c @@ -156,10 +156,10 @@ scalarize_load(nir_intrinsic_instr *intrinsic, nir_builder *b) nir_def *descriptor = intrinsic->src[0].ssa; nir_def *offset = intrinsic->src[1].ssa; nir_def *new_offset = intrinsic->src[2].ssa; - unsigned comp_size = intrinsic->dest.ssa.bit_size / 8; - for (unsigned i = 0; i < intrinsic->dest.ssa.num_components; i++) { + unsigned comp_size = intrinsic->def.bit_size / 8; + for (unsigned i = 0; i < intrinsic->def.num_components; i++) { results[i] = - nir_load_ssbo_ir3(b, 1, intrinsic->dest.ssa.bit_size, descriptor, + nir_load_ssbo_ir3(b, 1, intrinsic->def.bit_size, descriptor, nir_iadd_imm(b, offset, i * comp_size), nir_iadd_imm(b, new_offset, i), .access = nir_intrinsic_access(intrinsic), @@ -167,9 +167,9 @@ scalarize_load(nir_intrinsic_instr *intrinsic, nir_builder *b) .align_offset = nir_intrinsic_align_offset(intrinsic)); } - nir_def *result = nir_vec(b, results, intrinsic->dest.ssa.num_components); + nir_def *result = nir_vec(b, results, intrinsic->def.num_components); - nir_def_rewrite_uses(&intrinsic->dest.ssa, result); + nir_def_rewrite_uses(&intrinsic->def, result); nir_instr_remove(&intrinsic->instr); } @@ -185,7 +185,7 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b, nir_def *new_dest = NULL; /* for 16-bit ssbo access, offset is in 16-bit words instead of dwords */ - if ((has_dest && intrinsic->dest.ssa.bit_size == 16) || + if ((has_dest && intrinsic->def.bit_size == 16) || (!has_dest && intrinsic->src[0].ssa->bit_size == 16)) shift = 1; @@ -219,10 +219,10 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b, *target_src = nir_src_for_ssa(offset); if (has_dest) { - nir_def *dest = &intrinsic->dest.ssa; - nir_def_init(&new_intrinsic->instr, &new_intrinsic->dest.ssa, + nir_def *dest = &intrinsic->def; + nir_def_init(&new_intrinsic->instr, &new_intrinsic->def, dest->num_components, dest->bit_size); - new_dest = &new_intrinsic->dest.ssa; + new_dest = &new_intrinsic->def; } for (unsigned i = 0; i < num_srcs; i++) @@ -253,7 +253,7 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b, /* Replace the uses of the original destination by that * of the new intrinsic. */ - nir_def_rewrite_uses(&intrinsic->dest.ssa, new_dest); + nir_def_rewrite_uses(&intrinsic->def, new_dest); } /* Finally remove the original intrinsic. */ diff --git a/src/freedreno/ir3/ir3_nir_lower_layer_id.c b/src/freedreno/ir3/ir3_nir_lower_layer_id.c index 84be636..b36aeef 100644 --- a/src/freedreno/ir3/ir3_nir_lower_layer_id.c +++ b/src/freedreno/ir3/ir3_nir_lower_layer_id.c @@ -36,9 +36,9 @@ nir_lower_layer_id(nir_builder *b, nir_instr *instr, UNUSED void *cb_data) .num_slots = 1, }; nir_intrinsic_set_io_semantics(load_input, semantics); - nir_def_init(&load_input->instr, &load_input->dest.ssa, 1, 32); + nir_def_init(&load_input->instr, &load_input->def, 1, 32); nir_builder_instr_insert(b, &load_input->instr); - nir_def_rewrite_uses(&intr->dest.ssa, &load_input->dest.ssa); + nir_def_rewrite_uses(&intr->def, &load_input->def); return true; } diff --git a/src/freedreno/ir3/ir3_nir_lower_tess.c b/src/freedreno/ir3/ir3_nir_lower_tess.c index 24ff60c..0f3c13c 100644 --- a/src/freedreno/ir3/ir3_nir_lower_tess.c +++ b/src/freedreno/ir3/ir3_nir_lower_tess.c @@ -175,13 +175,13 @@ replace_intrinsic(nir_builder *b, nir_intrinsic_instr *intr, new_intr->num_components = intr->num_components; if (nir_intrinsic_infos[op].has_dest) - nir_def_init(&new_intr->instr, &new_intr->dest.ssa, - intr->num_components, intr->dest.ssa.bit_size); + nir_def_init(&new_intr->instr, &new_intr->def, + intr->num_components, intr->def.bit_size); nir_builder_instr_insert(b, &new_intr->instr); if (nir_intrinsic_infos[op].has_dest) - nir_def_rewrite_uses(&intr->dest.ssa, &new_intr->dest.ssa); + nir_def_rewrite_uses(&intr->def, &new_intr->def); nir_instr_remove(&intr->instr); @@ -348,7 +348,7 @@ lower_block_to_explicit_input(nir_block *block, nir_builder *b, b->cursor = nir_before_instr(&intr->instr); nir_def *iid = build_invocation_id(b, state); - nir_def_rewrite_uses(&intr->dest.ssa, iid); + nir_def_rewrite_uses(&intr->def, iid); nir_instr_remove(&intr->instr); break; } @@ -568,7 +568,7 @@ lower_tess_ctrl_block(nir_block *block, nir_builder *b, struct state *state) */ gl_varying_slot location = nir_intrinsic_io_semantics(intr).location; if (is_tess_levels(location)) { - assert(intr->dest.ssa.num_components == 1); + assert(intr->def.num_components == 1); address = nir_load_tess_factor_base_ir3(b); offset = build_tessfactor_base( b, location, nir_intrinsic_component(intr), state); @@ -775,7 +775,7 @@ lower_tess_eval_block(nir_block *block, nir_builder *b, struct state *state) */ gl_varying_slot location = nir_intrinsic_io_semantics(intr).location; if (is_tess_levels(location)) { - assert(intr->dest.ssa.num_components == 1); + assert(intr->def.num_components == 1); address = nir_load_tess_factor_base_ir3(b); offset = build_tessfactor_base( b, location, nir_intrinsic_component(intr), state); diff --git a/src/freedreno/ir3/ir3_nir_lower_wide_load_store.c b/src/freedreno/ir3/ir3_nir_lower_wide_load_store.c index 67655c0..a39ab1d 100644 --- a/src/freedreno/ir3/ir3_nir_lower_wide_load_store.c +++ b/src/freedreno/ir3/ir3_nir_lower_wide_load_store.c @@ -81,7 +81,7 @@ lower_wide_load_store(nir_builder *b, nir_instr *instr, void *unused) return NIR_LOWER_INSTR_PROGRESS_REPLACE; } else { unsigned num_comp = nir_intrinsic_dest_components(intr); - unsigned bit_size = intr->dest.ssa.bit_size; + unsigned bit_size = intr->def.bit_size; nir_def *addr = nir_ssa_for_src(b, intr->src[0], 1); nir_def *components[num_comp]; @@ -93,7 +93,7 @@ lower_wide_load_store(nir_builder *b, nir_instr *instr, void *unused) load->num_components = c; load->src[0] = nir_src_for_ssa(addr); nir_intrinsic_set_align(load, nir_intrinsic_align(intr), 0); - nir_def_init(&load->instr, &load->dest.ssa, c, bit_size); + nir_def_init(&load->instr, &load->def, c, bit_size); nir_builder_instr_insert(b, &load->instr); addr = nir_iadd(b, @@ -101,7 +101,7 @@ lower_wide_load_store(nir_builder *b, nir_instr *instr, void *unused) addr); for (unsigned i = 0; i < c; i++) { - components[off++] = nir_channel(b, &load->dest.ssa, i); + components[off++] = nir_channel(b, &load->def, i); } } diff --git a/src/freedreno/ir3/ir3_nir_opt_preamble.c b/src/freedreno/ir3/ir3_nir_opt_preamble.c index ff9a5a0..d774f5f 100644 --- a/src/freedreno/ir3/ir3_nir_opt_preamble.c +++ b/src/freedreno/ir3/ir3_nir_opt_preamble.c @@ -322,7 +322,7 @@ ir3_nir_lower_preamble(nir_shader *nir, struct ir3_shader_variant *v) if (intrin->intrinsic != nir_intrinsic_load_preamble) continue; - nir_def *dest = &intrin->dest.ssa; + nir_def *dest = &intrin->def; unsigned offset = preamble_base + nir_intrinsic_base(intrin); b->cursor = nir_before_instr(instr); diff --git a/src/freedreno/vulkan/tu_clear_blit.cc b/src/freedreno/vulkan/tu_clear_blit.cc index 15a3ff9..3fa97de 100644 --- a/src/freedreno/vulkan/tu_clear_blit.cc +++ b/src/freedreno/vulkan/tu_clear_blit.cc @@ -623,10 +623,10 @@ build_blit_fs_shader(bool zscale) nir_load_var(b, in_coords)); tex->coord_components = coord_components; - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(b, &tex->instr); - nir_store_var(b, out_color, &tex->dest.ssa, 0xf); + nir_store_var(b, out_color, &tex->def, 0xf); return b->shader; } @@ -681,10 +681,10 @@ build_ms_copy_fs_shader(void) tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_ms_index, nir_load_sample_id(b)); - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(b, &tex->instr); - nir_store_var(b, out_color, &tex->dest.ssa, 0xf); + nir_store_var(b, out_color, &tex->def, 0xf); return b->shader; } diff --git a/src/freedreno/vulkan/tu_shader.cc b/src/freedreno/vulkan/tu_shader.cc index 22c44c8..e4f8c47 100644 --- a/src/freedreno/vulkan/tu_shader.cc +++ b/src/freedreno/vulkan/tu_shader.cc @@ -155,11 +155,11 @@ lower_load_push_constant(struct tu_device *dev, nir_def *load = nir_load_uniform(b, instr->num_components, - instr->dest.ssa.bit_size, + instr->def.bit_size, nir_ushr_imm(b, instr->src[0].ssa, 2), .base = base); - nir_def_rewrite_uses(&instr->dest.ssa, load); + nir_def_rewrite_uses(&instr->def, load); nir_instr_remove(&instr->instr); } @@ -217,7 +217,7 @@ lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *instr, nir_ishl(b, vulkan_idx, shift)), shift); - nir_def_rewrite_uses(&instr->dest.ssa, def); + nir_def_rewrite_uses(&instr->def, def); nir_instr_remove(&instr->instr); } @@ -234,7 +234,7 @@ lower_vulkan_resource_reindex(nir_builder *b, nir_intrinsic_instr *instr) nir_ishl(b, delta, shift)), shift); - nir_def_rewrite_uses(&instr->dest.ssa, new_index); + nir_def_rewrite_uses(&instr->def, new_index); nir_instr_remove(&instr->instr); } @@ -249,7 +249,7 @@ lower_load_vulkan_descriptor(nir_builder *b, nir_intrinsic_instr *intrin) nir_vec3(b, nir_channel(b, old_index, 0), nir_channel(b, old_index, 1), nir_imm_int(b, 0)); - nir_def_rewrite_uses(&intrin->dest.ssa, new_index); + nir_def_rewrite_uses(&intrin->def, new_index); nir_instr_remove(&intrin->instr); } @@ -282,7 +282,7 @@ lower_ssbo_ubo_intrinsic(struct tu_device *dev, if (dev->physical_device->info->a6xx.storage_16bit && intrin->intrinsic == nir_intrinsic_load_ssbo && (nir_intrinsic_access(intrin) & ACCESS_CAN_REORDER) && - intrin->dest.ssa.bit_size > 16) { + intrin->def.bit_size > 16) { descriptor_idx = nir_iadd_imm(b, descriptor_idx, 1); } @@ -320,10 +320,10 @@ lower_ssbo_ubo_intrinsic(struct tu_device *dev, } if (info->has_dest) { - nir_def_init(©->instr, ©->dest.ssa, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); - results[i] = ©->dest.ssa; + nir_def_init(©->instr, ©->def, + intrin->def.num_components, + intrin->def.bit_size); + results[i] = ©->def; } nir_builder_instr_insert(b, ©->instr); @@ -333,7 +333,7 @@ lower_ssbo_ubo_intrinsic(struct tu_device *dev, } nir_def *result = - nir_undef(b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size); + nir_undef(b, intrin->def.num_components, intrin->def.bit_size); for (int i = MAX_SETS; i >= 0; i--) { nir_pop_if(b, NULL); if (info->has_dest) @@ -341,7 +341,7 @@ lower_ssbo_ubo_intrinsic(struct tu_device *dev, } if (info->has_dest) - nir_def_rewrite_uses(&intrin->dest.ssa, result); + nir_def_rewrite_uses(&intrin->def, result); nir_instr_remove(&intrin->instr); } @@ -522,9 +522,9 @@ lower_tex_ycbcr(const struct tu_pipeline_layout *layout, nir_def *result = nir_convert_ycbcr_to_rgb(builder, ycbcr_sampler->ycbcr_model, ycbcr_sampler->ycbcr_range, - &tex->dest.ssa, + &tex->def, bpcs); - nir_def_rewrite_uses_after(&tex->dest.ssa, result, + nir_def_rewrite_uses_after(&tex->def, result, result->parent_instr); builder->cursor = nir_before_instr(&tex->instr); @@ -629,9 +629,9 @@ lower_inline_ubo(nir_builder *b, nir_instr *instr, void *cb_data) /* Assume we're loading out-of-bounds from a 0-sized inline uniform * filtered out below. */ - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, nir_undef(b, intrin->num_components, - intrin->dest.ssa.bit_size)); + intrin->def.bit_size)); return true; } @@ -644,15 +644,15 @@ lower_inline_ubo(nir_builder *b, nir_instr *instr, void *cb_data) nir_def *base_addr = nir_load_uniform(b, 2, 32, nir_imm_int(b, 0), .base = base); val = nir_load_global_ir3(b, intrin->num_components, - intrin->dest.ssa.bit_size, + intrin->def.bit_size, base_addr, nir_ishr_imm(b, offset, 2)); } else { val = nir_load_uniform(b, intrin->num_components, - intrin->dest.ssa.bit_size, + intrin->def.bit_size, nir_ishr_imm(b, offset, 2), .base = base); } - nir_def_rewrite_uses(&intrin->dest.ssa, val); + nir_def_rewrite_uses(&intrin->def, val); nir_instr_remove(instr); return true; } diff --git a/src/gallium/auxiliary/gallivm/lp_bld_nir.c b/src/gallium/auxiliary/gallivm/lp_bld_nir.c index a088d53..0c46805 100644 --- a/src/gallium/auxiliary/gallivm/lp_bld_nir.c +++ b/src/gallium/auxiliary/gallivm/lp_bld_nir.c @@ -1296,8 +1296,8 @@ visit_load_input(struct lp_build_nir_context *bld_base, var.data.driver_location = nir_intrinsic_base(instr); var.data.location_frac = nir_intrinsic_component(instr); - unsigned nc = instr->dest.ssa.num_components; - unsigned bit_size = instr->dest.ssa.bit_size; + unsigned nc = instr->def.num_components; + unsigned bit_size = instr->def.bit_size; nir_src offset = *nir_get_io_offset_src(instr); bool indirect = !nir_src_is_const(offset); @@ -1364,8 +1364,8 @@ visit_load_reg(struct lp_build_nir_context *bld_base, LLVMValueRef val = bld_base->load_reg(bld_base, reg_bld, decl, base, indir_src, reg_storage); - if (!is_aos(bld_base) && instr->dest.ssa.num_components > 1) { - for (unsigned i = 0; i < instr->dest.ssa.num_components; i++) + if (!is_aos(bld_base) && instr->def.num_components > 1) { + for (unsigned i = 0; i < instr->def.num_components; i++) result[i] = LLVMBuildExtractValue(builder, val, i, ""); } else { result[0] = val; @@ -1435,8 +1435,8 @@ visit_load_var(struct lp_build_nir_context *bld_base, LLVMValueRef indir_index; LLVMValueRef indir_vertex_index = NULL; unsigned vertex_index = 0; - unsigned nc = instr->dest.ssa.num_components; - unsigned bit_size = instr->dest.ssa.bit_size; + unsigned nc = instr->def.num_components; + unsigned bit_size = instr->def.bit_size; if (var) { bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX && var->data.mode == nir_var_shader_in; @@ -1461,8 +1461,8 @@ visit_load_var(struct lp_build_nir_context *bld_base, */ if (var->data.compact && compact_array_index_oob(bld_base, var, const_index)) { struct lp_build_context *undef_bld = get_int_bld(bld_base, true, - instr->dest.ssa.bit_size); - for (int i = 0; i < instr->dest.ssa.num_components; i++) + instr->def.bit_size); + for (int i = 0; i < instr->def.num_components; i++) result[i] = LLVMGetUndef(undef_bld->vec_type); return; } @@ -1521,8 +1521,8 @@ visit_load_ubo(struct lp_build_nir_context *bld_base, if (nir_src_num_components(instr->src[0]) == 1) idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), ""); - bld_base->load_ubo(bld_base, instr->dest.ssa.num_components, - instr->dest.ssa.bit_size, + bld_base->load_ubo(bld_base, instr->def.num_components, + instr->def.bit_size, offset_is_uniform, idx, offset, result); } @@ -1537,8 +1537,8 @@ visit_load_push_constant(struct lp_build_nir_context *bld_base, LLVMValueRef idx = lp_build_const_int32(gallivm, 0); bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]); - bld_base->load_ubo(bld_base, instr->dest.ssa.num_components, - instr->dest.ssa.bit_size, + bld_base->load_ubo(bld_base, instr->def.num_components, + instr->def.bit_size, offset_is_uniform, idx, offset, result); } @@ -1556,8 +1556,8 @@ visit_load_ssbo(struct lp_build_nir_context *bld_base, bool index_and_offset_are_uniform = nir_src_is_always_uniform(instr->src[0]) && nir_src_is_always_uniform(instr->src[1]); - bld_base->load_mem(bld_base, instr->dest.ssa.num_components, - instr->dest.ssa.bit_size, + bld_base->load_mem(bld_base, instr->def.num_components, + instr->def.bit_size, index_and_offset_are_uniform, false, idx, offset, result); } @@ -1881,8 +1881,8 @@ visit_shared_load(struct lp_build_nir_context *bld_base, { LLVMValueRef offset = get_src(bld_base, instr->src[0]); bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]); - bld_base->load_mem(bld_base, instr->dest.ssa.num_components, - instr->dest.ssa.bit_size, + bld_base->load_mem(bld_base, instr->def.num_components, + instr->def.bit_size, offset_is_uniform, false, NULL, offset, result); } @@ -1957,8 +1957,8 @@ visit_load_kernel_input(struct lp_build_nir_context *bld_base, LLVMValueRef offset = get_src(bld_base, instr->src[0]); bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]); - bld_base->load_kernel_arg(bld_base, instr->dest.ssa.num_components, - instr->dest.ssa.bit_size, + bld_base->load_kernel_arg(bld_base, instr->def.num_components, + instr->def.bit_size, nir_src_bit_size(instr->src[0]), offset_is_uniform, offset, result); } @@ -1971,8 +1971,8 @@ visit_load_global(struct lp_build_nir_context *bld_base, { LLVMValueRef addr = get_src(bld_base, instr->src[0]); bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]); - bld_base->load_global(bld_base, instr->dest.ssa.num_components, - instr->dest.ssa.bit_size, + bld_base->load_global(bld_base, instr->def.num_components, + instr->def.bit_size, nir_src_bit_size(instr->src[0]), offset_is_uniform, addr, result); } @@ -2036,7 +2036,7 @@ visit_interp(struct lp_build_nir_context *bld_base, struct gallivm_state *gallivm = bld_base->base.gallivm; LLVMBuilderRef builder = gallivm->builder; nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr); - unsigned num_components = instr->dest.ssa.num_components; + unsigned num_components = instr->def.num_components; nir_variable *var = nir_deref_instr_get_variable(deref); unsigned const_index; LLVMValueRef indir_index; @@ -2067,8 +2067,8 @@ visit_load_scratch(struct lp_build_nir_context *bld_base, { LLVMValueRef offset = get_src(bld_base, instr->src[0]); - bld_base->load_scratch(bld_base, instr->dest.ssa.num_components, - instr->dest.ssa.bit_size, offset, result); + bld_base->load_scratch(bld_base, instr->def.num_components, + instr->def.bit_size, offset, result); } @@ -2091,8 +2091,8 @@ visit_payload_load(struct lp_build_nir_context *bld_base, { LLVMValueRef offset = get_src(bld_base, instr->src[0]); bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]); - bld_base->load_mem(bld_base, instr->dest.ssa.num_components, - instr->dest.ssa.bit_size, + bld_base->load_mem(bld_base, instr->def.num_components, + instr->def.bit_size, offset_is_uniform, true, NULL, offset, result); } @@ -2339,7 +2339,7 @@ visit_intrinsic(struct lp_build_nir_context *bld_base, break; } if (result[0]) { - assign_ssa_dest(bld_base, &instr->dest.ssa, result); + assign_ssa_dest(bld_base, &instr->def, result); } } @@ -2385,7 +2385,7 @@ visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr) params.resource = resource; bld_base->tex_size(bld_base, ¶ms); - assign_ssa_dest(bld_base, &instr->dest.ssa, + assign_ssa_dest(bld_base, &instr->def, &sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]); } @@ -2668,8 +2668,8 @@ visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr) params.sampler_resource = sampler_resource; bld_base->tex(bld_base, ¶ms); - if (instr->dest.ssa.bit_size != 32) { - assert(instr->dest.ssa.bit_size == 16); + if (instr->def.bit_size != 32) { + assert(instr->def.bit_size == 16); LLVMTypeRef vec_type = NULL; bool is_float = false; switch (nir_alu_type_get_base_type(instr->dest_type)) { @@ -2685,7 +2685,7 @@ visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr) default: unreachable("unexpected alu type"); } - for (int i = 0; i < instr->dest.ssa.num_components; ++i) { + for (int i = 0; i < instr->def.num_components; ++i) { if (is_float) { texel[i] = lp_build_float_to_half(gallivm, texel[i]); } else { @@ -2695,7 +2695,7 @@ visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr) } } - assign_ssa_dest(bld_base, &instr->dest.ssa, texel); + assign_ssa_dest(bld_base, &instr->def, texel); } @@ -2752,7 +2752,7 @@ visit_deref(struct lp_build_nir_context *bld_base, unreachable("Unhandled deref_instr deref type"); } - assign_ssa(bld_base, instr->dest.ssa.index, result); + assign_ssa(bld_base, instr->def.index, result); } diff --git a/src/gallium/auxiliary/gallivm/lp_bld_nir_soa.c b/src/gallium/auxiliary/gallivm/lp_bld_nir_soa.c index 9ad504c..3efcd22 100644 --- a/src/gallium/auxiliary/gallivm/lp_bld_nir_soa.c +++ b/src/gallium/auxiliary/gallivm/lp_bld_nir_soa.c @@ -1893,7 +1893,7 @@ static void emit_sysval_intrin(struct lp_build_nir_context *bld_base, { struct lp_build_nir_soa_context *bld = (struct lp_build_nir_soa_context *)bld_base; struct gallivm_state *gallivm = bld_base->base.gallivm; - struct lp_build_context *bld_broad = get_int_bld(bld_base, true, instr->dest.ssa.bit_size); + struct lp_build_context *bld_broad = get_int_bld(bld_base, true, instr->def.bit_size); switch (instr->intrinsic) { case nir_intrinsic_load_instance_id: result[0] = lp_build_broadcast_scalar(&bld_base->uint_bld, bld->system_values.instance_id); @@ -1917,7 +1917,7 @@ static void emit_sysval_intrin(struct lp_build_nir_context *bld_base, LLVMValueRef tmp[3]; for (unsigned i = 0; i < 3; i++) { tmp[i] = bld->system_values.block_id[i]; - if (instr->dest.ssa.bit_size == 64) + if (instr->def.bit_size == 64) tmp[i] = LLVMBuildZExt(gallivm->builder, tmp[i], bld_base->uint64_bld.elem_type, ""); result[i] = lp_build_broadcast_scalar(bld_broad, tmp[i]); } @@ -1934,7 +1934,7 @@ static void emit_sysval_intrin(struct lp_build_nir_context *bld_base, LLVMValueRef tmp[3]; for (unsigned i = 0; i < 3; i++) { tmp[i] = bld->system_values.grid_size[i]; - if (instr->dest.ssa.bit_size == 64) + if (instr->def.bit_size == 64) tmp[i] = LLVMBuildZExt(gallivm->builder, tmp[i], bld_base->uint64_bld.elem_type, ""); result[i] = lp_build_broadcast_scalar(bld_broad, tmp[i]); } diff --git a/src/gallium/auxiliary/nir/nir_draw_helpers.c b/src/gallium/auxiliary/nir/nir_draw_helpers.c index 32fcba5..5bb2abf 100644 --- a/src/gallium/auxiliary/nir/nir_draw_helpers.c +++ b/src/gallium/auxiliary/nir/nir_draw_helpers.c @@ -78,7 +78,7 @@ nir_lower_pstipple_block(nir_block *block, tex->texture_index = state->stip_tex->data.binding; tex->sampler_index = state->stip_tex->data.binding; tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_coord, texcoord); - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(b, &tex->instr); @@ -86,11 +86,11 @@ nir_lower_pstipple_block(nir_block *block, switch (state->bool_type) { case nir_type_bool1: - condition = nir_fneu_imm(b, nir_channel(b, &tex->dest.ssa, 3), 0.0); + condition = nir_fneu_imm(b, nir_channel(b, &tex->def, 3), 0.0); break; case nir_type_bool32: - condition = nir_fneu32(b, nir_channel(b, &tex->dest.ssa, 3), - nir_imm_floatN_t(b, 0.0, tex->dest.ssa.bit_size)); + condition = nir_fneu32(b, nir_channel(b, &tex->def, 3), + nir_imm_floatN_t(b, 0.0, tex->def.bit_size)); break; default: unreachable("Invalid Boolean type."); diff --git a/src/gallium/auxiliary/nir/nir_to_tgsi.c b/src/gallium/auxiliary/nir/nir_to_tgsi.c index f042968..46a02fa 100644 --- a/src/gallium/auxiliary/nir/nir_to_tgsi.c +++ b/src/gallium/auxiliary/nir/nir_to_tgsi.c @@ -804,7 +804,7 @@ ntt_try_store_reg_in_tgsi_output(struct ntt_compile *c, struct ureg_dst *dst, nir_src *use = NULL; nir_foreach_reg_load(src, reg_decl) { nir_intrinsic_instr *load = nir_instr_as_intrinsic(src->parent_instr); - nir_foreach_use_including_if(load_use, &load->dest.ssa) { + nir_foreach_use_including_if(load_use, &load->def) { /* We can only have one use */ if (use != NULL) return false; @@ -1123,7 +1123,7 @@ ntt_setup_registers(struct ntt_compile *c) nir_foreach_reg_decl_safe(nir_reg, nir_shader_get_entrypoint(c->s)) { /* Permanently allocate all the array regs at the start. */ unsigned num_array_elems = nir_intrinsic_num_array_elems(nir_reg); - unsigned index = nir_reg->dest.ssa.index; + unsigned index = nir_reg->def.index; if (num_array_elems != 0) { struct ureg_dst decl = ureg_DECL_array_temporary(c->ureg, num_array_elems, true); @@ -1141,7 +1141,7 @@ ntt_setup_registers(struct ntt_compile *c) unsigned num_array_elems = nir_intrinsic_num_array_elems(nir_reg); unsigned num_components = nir_intrinsic_num_components(nir_reg); unsigned bit_size = nir_intrinsic_bit_size(nir_reg); - unsigned index = nir_reg->dest.ssa.index; + unsigned index = nir_reg->def.index; /* We already handled arrays */ if (num_array_elems == 0) { @@ -1879,7 +1879,7 @@ ntt_shift_by_frac(struct ureg_src src, unsigned frac, unsigned num_components) static void ntt_emit_load_ubo(struct ntt_compile *c, nir_intrinsic_instr *instr) { - int bit_size = instr->dest.ssa.bit_size; + int bit_size = instr->def.bit_size; assert(bit_size == 32 || instr->num_components <= 2); struct ureg_src src = ureg_src_register(TGSI_FILE_CONSTANT, 0); @@ -1923,14 +1923,14 @@ ntt_emit_load_ubo(struct ntt_compile *c, nir_intrinsic_instr *instr) src = ntt_shift_by_frac(src, start_component, instr->num_components * bit_size / 32); - ntt_store(c, &instr->dest.ssa, src); + ntt_store(c, &instr->def, src); } else { /* PIPE_CAP_LOAD_CONSTBUF: Not necessarily vec4 aligned, emit a * TGSI_OPCODE_LOAD instruction from the const file. */ struct ntt_insn *insn = ntt_insn(c, TGSI_OPCODE_LOAD, - ntt_get_dest(c, &instr->dest.ssa), + ntt_get_dest(c, &instr->def), src, ntt_get_src(c, instr->src[1]), ureg_src_undef(), ureg_src_undef()); insn->is_mem = true; @@ -2112,7 +2112,7 @@ ntt_emit_mem(struct ntt_compile *c, nir_intrinsic_instr *instr, write_mask = ntt_64bit_write_mask(write_mask); dst = ureg_writemask(dst, write_mask); } else { - dst = ntt_get_dest(c, &instr->dest.ssa); + dst = ntt_get_dest(c, &instr->def); } struct ntt_insn *insn = ntt_insn(c, opcode, dst, src[0], src[1], src[2], src[3]); @@ -2157,7 +2157,7 @@ ntt_emit_image_load_store(struct ntt_compile *c, nir_intrinsic_instr *instr) dst = ureg_dst(resource); } else { srcs[num_src++] = resource; - dst = ntt_get_dest(c, &instr->dest.ssa); + dst = ntt_get_dest(c, &instr->def); } struct ureg_dst opcode_dst = dst; @@ -2234,7 +2234,7 @@ ntt_emit_load_input(struct ntt_compile *c, nir_intrinsic_instr *instr) unsigned base = nir_intrinsic_base(instr); struct ureg_src input; nir_io_semantics semantics = nir_intrinsic_io_semantics(instr); - bool is_64 = instr->dest.ssa.bit_size == 64; + bool is_64 = instr->def.bit_size == 64; if (c->s->info.stage == MESA_SHADER_VERTEX) { input = ureg_DECL_vs_input(c->ureg, base); @@ -2269,13 +2269,13 @@ ntt_emit_load_input(struct ntt_compile *c, nir_intrinsic_instr *instr) switch (instr->intrinsic) { case nir_intrinsic_load_input: input = ntt_ureg_src_indirect(c, input, instr->src[0], 0); - ntt_store(c, &instr->dest.ssa, input); + ntt_store(c, &instr->def, input); break; case nir_intrinsic_load_per_vertex_input: input = ntt_ureg_src_indirect(c, input, instr->src[1], 0); input = ntt_ureg_src_dimension_indirect(c, input, instr->src[0]); - ntt_store(c, &instr->dest.ssa, input); + ntt_store(c, &instr->def, input); break; case nir_intrinsic_load_interpolated_input: { @@ -2290,7 +2290,7 @@ ntt_emit_load_input(struct ntt_compile *c, nir_intrinsic_instr *instr) /* For these, we know that the barycentric load matches the * interpolation on the input declaration, so we can use it directly. */ - ntt_store(c, &instr->dest.ssa, input); + ntt_store(c, &instr->def, input); break; case nir_intrinsic_load_barycentric_centroid: @@ -2299,21 +2299,21 @@ ntt_emit_load_input(struct ntt_compile *c, nir_intrinsic_instr *instr) * input. */ if (c->centroid_inputs & (1ull << nir_intrinsic_base(instr))) { - ntt_store(c, &instr->dest.ssa, input); + ntt_store(c, &instr->def, input); } else { - ntt_INTERP_CENTROID(c, ntt_get_dest(c, &instr->dest.ssa), input); + ntt_INTERP_CENTROID(c, ntt_get_dest(c, &instr->def), input); } break; case nir_intrinsic_load_barycentric_at_sample: /* We stored the sample in the fake "bary" dest. */ - ntt_INTERP_SAMPLE(c, ntt_get_dest(c, &instr->dest.ssa), input, + ntt_INTERP_SAMPLE(c, ntt_get_dest(c, &instr->def), input, ntt_get_src(c, instr->src[0])); break; case nir_intrinsic_load_barycentric_at_offset: /* We stored the offset in the fake "bary" dest. */ - ntt_INTERP_OFFSET(c, ntt_get_dest(c, &instr->dest.ssa), input, + ntt_INTERP_OFFSET(c, ntt_get_dest(c, &instr->def), input, ntt_get_src(c, instr->src[0])); break; @@ -2383,7 +2383,7 @@ ntt_emit_load_output(struct ntt_compile *c, nir_intrinsic_instr *instr) out = ntt_ureg_dst_indirect(c, out, instr->src[0]); } - struct ureg_dst dst = ntt_get_dest(c, &instr->dest.ssa); + struct ureg_dst dst = ntt_get_dest(c, &instr->def); struct ureg_src out_src = ureg_src(out); /* Don't swizzling unavailable channels of the output in the writemasked-out @@ -2414,7 +2414,7 @@ ntt_emit_load_sysval(struct ntt_compile *c, nir_intrinsic_instr *instr) * aren't defined, even if they aren't really read. (GLSL compile fails on * gl_NumWorkGroups.w, for example). */ - uint32_t write_mask = BITSET_MASK(instr->dest.ssa.num_components); + uint32_t write_mask = BITSET_MASK(instr->def.num_components); sv = ntt_swizzle_for_write_mask(sv, write_mask); /* TGSI and NIR define these intrinsics as always loading ints, but they can @@ -2426,7 +2426,7 @@ ntt_emit_load_sysval(struct ntt_compile *c, nir_intrinsic_instr *instr) switch (instr->intrinsic) { case nir_intrinsic_load_vertex_id: case nir_intrinsic_load_instance_id: - ntt_U2F(c, ntt_get_dest(c, &instr->dest.ssa), sv); + ntt_U2F(c, ntt_get_dest(c, &instr->def), sv); return; default: @@ -2434,7 +2434,7 @@ ntt_emit_load_sysval(struct ntt_compile *c, nir_intrinsic_instr *instr) } } - ntt_store(c, &instr->dest.ssa, sv); + ntt_store(c, &instr->def, sv); } static void @@ -2563,26 +2563,26 @@ ntt_emit_intrinsic(struct ntt_compile *c, nir_intrinsic_instr *instr) } case nir_intrinsic_is_helper_invocation: - ntt_READ_HELPER(c, ntt_get_dest(c, &instr->dest.ssa)); + ntt_READ_HELPER(c, ntt_get_dest(c, &instr->def)); break; case nir_intrinsic_vote_all: - ntt_VOTE_ALL(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c,instr->src[0])); + ntt_VOTE_ALL(c, ntt_get_dest(c, &instr->def), ntt_get_src(c,instr->src[0])); return; case nir_intrinsic_vote_any: - ntt_VOTE_ANY(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c, instr->src[0])); + ntt_VOTE_ANY(c, ntt_get_dest(c, &instr->def), ntt_get_src(c, instr->src[0])); return; case nir_intrinsic_vote_ieq: - ntt_VOTE_EQ(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c, instr->src[0])); + ntt_VOTE_EQ(c, ntt_get_dest(c, &instr->def), ntt_get_src(c, instr->src[0])); return; case nir_intrinsic_ballot: - ntt_BALLOT(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c, instr->src[0])); + ntt_BALLOT(c, ntt_get_dest(c, &instr->def), ntt_get_src(c, instr->src[0])); return; case nir_intrinsic_read_first_invocation: - ntt_READ_FIRST(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c, instr->src[0])); + ntt_READ_FIRST(c, ntt_get_dest(c, &instr->def), ntt_get_src(c, instr->src[0])); return; case nir_intrinsic_read_invocation: - ntt_READ_INVOC(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c, instr->src[0]), ntt_get_src(c, instr->src[1])); + ntt_READ_INVOC(c, ntt_get_dest(c, &instr->def), ntt_get_src(c, instr->src[0]), ntt_get_src(c, instr->src[1])); return; case nir_intrinsic_load_ssbo: @@ -2654,11 +2654,11 @@ ntt_emit_intrinsic(struct ntt_compile *c, nir_intrinsic_instr *instr) break; case nir_intrinsic_load_barycentric_at_sample: case nir_intrinsic_load_barycentric_at_offset: - ntt_store(c, &instr->dest.ssa, ntt_get_src(c, instr->src[0])); + ntt_store(c, &instr->def, ntt_get_src(c, instr->src[0])); break; case nir_intrinsic_shader_clock: - ntt_CLOCK(c, ntt_get_dest(c, &instr->dest.ssa)); + ntt_CLOCK(c, ntt_get_dest(c, &instr->def)); break; case nir_intrinsic_decl_reg: @@ -2714,7 +2714,7 @@ ntt_push_tex_arg(struct ntt_compile *c, static void ntt_emit_texture(struct ntt_compile *c, nir_tex_instr *instr) { - struct ureg_dst dst = ntt_get_dest(c, &instr->dest.ssa); + struct ureg_dst dst = ntt_get_dest(c, &instr->def); enum tgsi_texture_type target = tgsi_texture_type_from_sampler_dim(instr->sampler_dim, instr->is_array, instr->is_shadow); unsigned tex_opcode; @@ -3395,7 +3395,7 @@ nir_to_tgsi_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr) bool has_dest = nir_intrinsic_infos[instr->intrinsic].has_dest; if (has_dest) { - if (instr->dest.ssa.bit_size != 64) + if (instr->def.bit_size != 64) return false; } else { if (nir_src_bit_size(instr->src[0]) != 64) @@ -3428,8 +3428,8 @@ nir_to_tgsi_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr) first->num_components = 2; second->num_components -= 2; if (has_dest) { - first->dest.ssa.num_components = 2; - second->dest.ssa.num_components -= 2; + first->def.num_components = 2; + second->def.num_components -= 2; } nir_builder_instr_insert(b, &first->instr); @@ -3438,13 +3438,13 @@ nir_to_tgsi_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr) if (has_dest) { /* Merge the two loads' results back into a vector. */ nir_scalar channels[4] = { - nir_get_ssa_scalar(&first->dest.ssa, 0), - nir_get_ssa_scalar(&first->dest.ssa, 1), - nir_get_ssa_scalar(&second->dest.ssa, 0), - nir_get_ssa_scalar(&second->dest.ssa, second->num_components > 1 ? 1 : 0), + nir_get_ssa_scalar(&first->def, 0), + nir_get_ssa_scalar(&first->def, 1), + nir_get_ssa_scalar(&second->def, 0), + nir_get_ssa_scalar(&second->def, second->num_components > 1 ? 1 : 0), }; nir_def *new = nir_vec_scalars(b, channels, instr->num_components); - nir_def_rewrite_uses(&instr->dest.ssa, new); + nir_def_rewrite_uses(&instr->def, new); } else { /* Split the src value across the two stores. */ b->cursor = nir_before_instr(&instr->instr); @@ -3723,7 +3723,7 @@ ntt_lower_atomic_pre_dec_lower(nir_builder *b, nir_instr *instr, void *_data) { nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); - nir_def *old_result = &intr->dest.ssa; + nir_def *old_result = &intr->def; intr->intrinsic = nir_intrinsic_atomic_counter_post_dec; return nir_iadd_imm(b, old_result, -1); diff --git a/src/gallium/auxiliary/nir/nir_to_tgsi_info.c b/src/gallium/auxiliary/nir/nir_to_tgsi_info.c index d52853c..90b0416 100644 --- a/src/gallium/auxiliary/nir/nir_to_tgsi_info.c +++ b/src/gallium/auxiliary/nir/nir_to_tgsi_info.c @@ -136,7 +136,7 @@ static void gather_intrinsic_load_deref_info(const nir_shader *nir, assert(var && var->data.mode == nir_var_shader_in); if (nir->info.stage == MESA_SHADER_FRAGMENT) - gather_usage(deref, nir_def_components_read(&instr->dest.ssa), + gather_usage(deref, nir_def_components_read(&instr->def), info->input_usage_mask); switch (nir->info.stage) { diff --git a/src/gallium/auxiliary/nir/tgsi_to_nir.c b/src/gallium/auxiliary/nir/tgsi_to_nir.c index d09412f..b941d37 100644 --- a/src/gallium/auxiliary/nir/tgsi_to_nir.c +++ b/src/gallium/auxiliary/nir/tgsi_to_nir.c @@ -742,10 +742,10 @@ ttn_src_for_file_and_index(struct ttn_compile *c, unsigned file, unsigned index, } load->src[srcn++] = nir_src_for_ssa(offset); - nir_def_init(&load->instr, &load->dest.ssa, 4, 32); + nir_def_init(&load->instr, &load->def, 4, 32); nir_builder_instr_insert(b, &load->instr); - src = nir_src_for_ssa(&load->dest.ssa); + src = nir_src_for_ssa(&load->def); break; } @@ -1290,10 +1290,10 @@ ttn_tex(struct ttn_compile *c, nir_def **src) unsigned src_number = 0; instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &deref->dest.ssa); + &deref->def); src_number++; instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref, - &deref->dest.ssa); + &deref->def); src_number++; instr->src[src_number] = @@ -1401,10 +1401,10 @@ ttn_tex(struct ttn_compile *c, nir_def **src) assert(src_number == num_srcs); assert(src_number == instr->num_srcs); - nir_def_init(&instr->instr, &instr->dest.ssa, + nir_def_init(&instr->instr, &instr->def, nir_tex_instr_dest_size(instr), 32); nir_builder_instr_insert(b, &instr->instr); - return nir_pad_vector_imm_int(b, &instr->dest.ssa, 0, 4); + return nir_pad_vector_imm_int(b, &instr->def, 0, 4); } /* TGSI_OPCODE_TXQ is actually two distinct operations: @@ -1451,24 +1451,24 @@ ttn_txq(struct ttn_compile *c, nir_def **src) nir_deref_instr *deref = nir_build_deref_var(b, var); txs->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &deref->dest.ssa); + &deref->def); qlv->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &deref->dest.ssa); + &deref->def); /* lod: */ txs->src[1] = nir_tex_src_for_ssa(nir_tex_src_lod, ttn_channel(b, src[0], X)); - nir_def_init(&txs->instr, &txs->dest.ssa, nir_tex_instr_dest_size(txs), 32); + nir_def_init(&txs->instr, &txs->def, nir_tex_instr_dest_size(txs), 32); nir_builder_instr_insert(b, &txs->instr); - nir_def_init(&qlv->instr, &qlv->dest.ssa, 1, 32); + nir_def_init(&qlv->instr, &qlv->def, 1, 32); nir_builder_instr_insert(b, &qlv->instr); return nir_vector_insert_imm(b, - nir_pad_vector_imm_int(b, &txs->dest.ssa, 0, 4), - &qlv->dest.ssa, 3); + nir_pad_vector_imm_int(b, &txs->def, 0, 4), + &qlv->def, 3); } static enum glsl_base_type @@ -1592,7 +1592,7 @@ ttn_mem(struct ttn_compile *c, nir_def **src) nir_intrinsic_set_access(instr, image_deref->var->data.access); - instr->src[0] = nir_src_for_ssa(&image_deref->dest.ssa); + instr->src[0] = nir_src_for_ssa(&image_deref->def); instr->src[1] = nir_src_for_ssa(src[addr_src_index]); /* Set the sample argument, which is undefined for single-sample images. */ @@ -1621,9 +1621,9 @@ ttn_mem(struct ttn_compile *c, nir_def **src) if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_LOAD) { - nir_def_init(&instr->instr, &instr->dest.ssa, instr->num_components, 32); + nir_def_init(&instr->instr, &instr->def, instr->num_components, 32); nir_builder_instr_insert(b, &instr->instr); - return nir_pad_vector_imm_int(b, &instr->dest.ssa, 0, 4); + return nir_pad_vector_imm_int(b, &instr->def, 0, 4); } else { nir_builder_instr_insert(b, &instr->instr); return NULL; diff --git a/src/gallium/drivers/asahi/agx_nir_lower_sysvals.c b/src/gallium/drivers/asahi/agx_nir_lower_sysvals.c index 69ec160..a2c72ea 100644 --- a/src/gallium/drivers/asahi/agx_nir_lower_sysvals.c +++ b/src/gallium/drivers/asahi/agx_nir_lower_sysvals.c @@ -142,11 +142,11 @@ lower_sysvals(nir_builder *b, nir_instr *instr, void *data) if (instr->type == nir_instr_type_intrinsic) { nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); - old = &intr->dest.ssa; + old = &intr->def; replacement = lower_intrinsic(b, intr); } else if (instr->type == nir_instr_type_tex) { nir_tex_instr *tex = nir_instr_as_tex(instr); - old = &tex->dest.ssa; + old = &tex->def; if (tex->op != nir_texop_lod_bias_agx) return false; @@ -183,9 +183,9 @@ record_loads(nir_builder *b, nir_instr *instr, void *data) if (intr->intrinsic != nir_intrinsic_load_preamble) return false; - assert(intr->dest.ssa.bit_size >= 16 && "no 8-bit sysvals"); - unsigned dim = intr->dest.ssa.num_components; - unsigned element_size = intr->dest.ssa.bit_size / 16; + assert(intr->def.bit_size >= 16 && "no 8-bit sysvals"); + unsigned dim = intr->def.num_components; + unsigned element_size = intr->def.bit_size / 16; unsigned length = dim * element_size; struct state *state = data; diff --git a/src/gallium/drivers/asahi/agx_streamout.c b/src/gallium/drivers/asahi/agx_streamout.c index 5de3c24..bf5cb44 100644 --- a/src/gallium/drivers/asahi/agx_streamout.c +++ b/src/gallium/drivers/asahi/agx_streamout.c @@ -438,7 +438,7 @@ lower_xfb_intrinsics(struct nir_builder *b, nir_instr *instr, void *data) /* XXX: Rename to "xfb index" to avoid the clash */ case nir_intrinsic_load_vertex_id_zero_base: { nir_def *id = nir_load_vertex_id(b); - nir_def_rewrite_uses(&intr->dest.ssa, id); + nir_def_rewrite_uses(&intr->def, id); return true; } @@ -533,7 +533,7 @@ lower_xfb_intrinsics(struct nir_builder *b, nir_instr *instr, void *data) id = nir_u2uN(b, index, id->bit_size); } - nir_def_rewrite_uses(&intr->dest.ssa, id); + nir_def_rewrite_uses(&intr->def, id); return true; } diff --git a/src/gallium/drivers/crocus/crocus_program.c b/src/gallium/drivers/crocus/crocus_program.c index ebd9767..cfe68ce 100644 --- a/src/gallium/drivers/crocus/crocus_program.c +++ b/src/gallium/drivers/crocus/crocus_program.c @@ -467,7 +467,7 @@ crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo, case nir_intrinsic_load_base_workgroup_id: { /* GL doesn't have a concept of base workgroup */ b.cursor = nir_instr_remove(&intrin->instr); - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, nir_imm_zero(&b, 3, 32)); continue; } @@ -491,13 +491,13 @@ crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo, nir_intrinsic_set_align(load_ubo, 4, 0); nir_intrinsic_set_range_base(load_ubo, 0); nir_intrinsic_set_range(load_ubo, ~0); - nir_def_init(&load_ubo->instr, &load_ubo->dest.ssa, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); + nir_def_init(&load_ubo->instr, &load_ubo->def, + intrin->def.num_components, + intrin->def.bit_size); nir_builder_instr_insert(&b, &load_ubo->instr); - nir_def_rewrite_uses(&intrin->dest.ssa, - &load_ubo->dest.ssa); + nir_def_rewrite_uses(&intrin->def, + &load_ubo->def); nir_instr_remove(&intrin->instr); continue; } @@ -631,10 +631,10 @@ crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo, nir_intrinsic_set_align(load, 4, 0); nir_intrinsic_set_range_base(load, 0); nir_intrinsic_set_range(load, ~0); - nir_def_init(&load->instr, &load->dest.ssa, comps, 32); + nir_def_init(&load->instr, &load->def, comps, 32); nir_builder_instr_insert(&b, &load->instr); - nir_def_rewrite_uses(&intrin->dest.ssa, - &load->dest.ssa); + nir_def_rewrite_uses(&intrin->def, + &load->def); nir_instr_remove(instr); } } @@ -984,13 +984,13 @@ crocus_setup_binding_table(const struct intel_device_info *devinfo, enum gfx6_gather_sampler_wa wa = key->gfx6_gather_wa[tex->texture_index]; int width = (wa & WA_8BIT) ? 8 : 16; - nir_def *val = nir_fmul_imm(&b, &tex->dest.ssa, (1 << width) - 1); + nir_def *val = nir_fmul_imm(&b, &tex->def, (1 << width) - 1); val = nir_f2u32(&b, val); if (wa & WA_SIGN) { val = nir_ishl_imm(&b, val, 32 - width); val = nir_ishr_imm(&b, val, 32 - width); } - nir_def_rewrite_uses_after(&tex->dest.ssa, val, val->parent_instr); + nir_def_rewrite_uses_after(&tex->def, val, val->parent_instr); } tex->texture_index = diff --git a/src/gallium/drivers/d3d12/d3d12_blit.cpp b/src/gallium/drivers/d3d12/d3d12_blit.cpp index 87cd421..142e33a 100644 --- a/src/gallium/drivers/d3d12/d3d12_blit.cpp +++ b/src/gallium/drivers/d3d12/d3d12_blit.cpp @@ -661,7 +661,7 @@ get_stencil_resolve_fs(struct d3d12_context *ctx, bool no_flip) sampler->data.binding = 0; sampler->data.explicit_binding = true; - nir_def *tex_deref = &nir_build_deref_var(&b, sampler)->dest.ssa; + nir_def *tex_deref = &nir_build_deref_var(&b, sampler)->def; nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in, glsl_vec4_type(), "pos"); @@ -680,7 +680,7 @@ get_stencil_resolve_fs(struct d3d12_context *ctx, bool no_flip) txs->is_array = false; txs->dest_type = nir_type_int; - nir_def_init(&txs->instr, &txs->dest.ssa, 2, 32); + nir_def_init(&txs->instr, &txs->def, 2, 32); nir_builder_instr_insert(&b, &txs->instr); pos_src = nir_vec4(&b, @@ -688,7 +688,7 @@ get_stencil_resolve_fs(struct d3d12_context *ctx, bool no_flip) /*Height - pos_dest.y - 1*/ nir_fsub(&b, nir_fsub(&b, - nir_channel(&b, nir_i2f32(&b, &txs->dest.ssa), 1), + nir_channel(&b, nir_i2f32(&b, &txs->def), 1), nir_channel(&b, pos, 1)), nir_imm_float(&b, 1.0)), nir_channel(&b, pos, 2), @@ -706,10 +706,10 @@ get_stencil_resolve_fs(struct d3d12_context *ctx, bool no_flip) tex->is_array = false; tex->coord_components = 2; - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(&b, &tex->instr); - nir_store_var(&b, stencil_out, nir_channel(&b, &tex->dest.ssa, 1), 0x1); + nir_store_var(&b, stencil_out, nir_channel(&b, &tex->def, 1), 0x1); struct pipe_shader_state state = {}; state.type = PIPE_SHADER_IR_NIR; diff --git a/src/gallium/drivers/d3d12/d3d12_lower_image_casts.c b/src/gallium/drivers/d3d12/d3d12_lower_image_casts.c index a00fcee..35b82a9 100644 --- a/src/gallium/drivers/d3d12/d3d12_lower_image_casts.c +++ b/src/gallium/drivers/d3d12/d3d12_lower_image_casts.c @@ -217,7 +217,7 @@ lower_image_cast_instr(nir_builder *b, nir_instr *instr, void *_data) const struct util_format_description *from_desc, *to_desc; if (intr->intrinsic == nir_intrinsic_image_deref_load) { b->cursor = nir_after_instr(instr); - value = &intr->dest.ssa; + value = &intr->def; from_desc = util_format_description(emulation_format); to_desc = util_format_description(real_format); } else { diff --git a/src/gallium/drivers/d3d12/d3d12_nir_passes.c b/src/gallium/drivers/d3d12/d3d12_nir_passes.c index f137275..27c32b6 100644 --- a/src/gallium/drivers/d3d12/d3d12_nir_passes.c +++ b/src/gallium/drivers/d3d12/d3d12_nir_passes.c @@ -137,7 +137,7 @@ lower_pos_read(nir_builder *b, struct nir_instr *instr, pos = nir_vector_insert_imm(b, pos, depth, 2); - nir_def_rewrite_uses_after(&intr->dest.ssa, pos, + nir_def_rewrite_uses_after(&intr->def, pos, pos->parent_instr); } @@ -183,7 +183,7 @@ lower_compute_state_vars(nir_builder *b, nir_instr *instr, void *_state) return false; } - nir_def_rewrite_uses(&intr->dest.ssa, result); + nir_def_rewrite_uses(&intr->def, result); nir_instr_remove(instr); return true; } @@ -275,7 +275,7 @@ lower_load_draw_params(nir_builder *b, nir_instr *instr, void *draw_params) unsigned channel = intr->intrinsic == nir_intrinsic_load_first_vertex ? 0 : intr->intrinsic == nir_intrinsic_load_base_instance ? 1 : intr->intrinsic == nir_intrinsic_load_draw_id ? 2 : 3; - nir_def_rewrite_uses(&intr->dest.ssa, nir_channel(b, load, channel)); + nir_def_rewrite_uses(&intr->def, nir_channel(b, load, channel)); nir_instr_remove(instr); return true; @@ -305,7 +305,7 @@ lower_load_patch_vertices_in(nir_builder *b, nir_instr *instr, void *_state) nir_def *load = b->shader->info.stage == MESA_SHADER_TESS_CTRL ? d3d12_get_state_var(b, D3D12_STATE_VAR_PATCH_VERTICES_IN, "d3d12_FirstVertex", glsl_uint_type(), _state) : nir_imm_int(b, b->shader->info.tess.tcs_vertices_out); - nir_def_rewrite_uses(&intr->dest.ssa, load); + nir_def_rewrite_uses(&intr->def, load); nir_instr_remove(instr); return true; } @@ -482,7 +482,7 @@ lower_instr(nir_intrinsic_instr *instr, nir_builder *b, nir_def *ubo_idx = nir_imm_int(b, binding); nir_def *ubo_offset = nir_imm_int(b, get_state_var_offset(shader, var) * 4); nir_def *load = - nir_load_ubo(b, instr->num_components, instr->dest.ssa.bit_size, + nir_load_ubo(b, instr->num_components, instr->def.bit_size, ubo_idx, ubo_offset, .align_mul = 16, .align_offset = 0, @@ -490,13 +490,13 @@ lower_instr(nir_intrinsic_instr *instr, nir_builder *b, .range = ~0, ); - nir_def_rewrite_uses(&instr->dest.ssa, load); + nir_def_rewrite_uses(&instr->def, load); /* Remove the old load_* instruction and any parent derefs */ nir_instr_remove(&instr->instr); for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) { /* If anyone is using this deref, leave it alone */ - if (!list_is_empty(&d->dest.ssa.uses)) + if (!list_is_empty(&d->def.uses)) break; nir_instr_remove(&d->instr); @@ -899,7 +899,7 @@ split_multistream_varying_stores(nir_builder *b, nir_instr *instr, void *_state) first_channel += var_state->subvars[subvar].num_components; unsigned new_write_mask = (orig_write_mask >> first_channel) & mask_num_channels; - nir_build_store_deref(b, &new_path->dest.ssa, sub_value, new_write_mask, nir_intrinsic_access(intr)); + nir_build_store_deref(b, &new_path->def, sub_value, new_write_mask, nir_intrinsic_access(intr)); } nir_deref_path_finish(&path); diff --git a/src/gallium/drivers/etnaviv/etnaviv_compiler_nir.c b/src/gallium/drivers/etnaviv/etnaviv_compiler_nir.c index 09e04c8..cc1d6cf 100644 --- a/src/gallium/drivers/etnaviv/etnaviv_compiler_nir.c +++ b/src/gallium/drivers/etnaviv/etnaviv_compiler_nir.c @@ -523,7 +523,7 @@ static void emit_tex(struct etna_compile *c, nir_tex_instr * tex) { unsigned dst_swiz; - hw_dst dst = ra_def(c, &tex->dest.ssa, &dst_swiz); + hw_dst dst = ra_def(c, &tex->def, &dst_swiz); nir_src *coord = NULL, *src1 = NULL, *src2 = NULL; for (unsigned i = 0; i < tex->num_srcs; i++) { @@ -568,7 +568,7 @@ emit_intrinsic(struct etna_compile *c, nir_intrinsic_instr * intr) break; case nir_intrinsic_load_uniform: { unsigned dst_swiz; - struct etna_inst_dst dst = ra_def(c, &intr->dest.ssa, &dst_swiz); + struct etna_inst_dst dst = ra_def(c, &intr->def, &dst_swiz); /* TODO: rework so extra MOV isn't required, load up to 4 addresses at once */ emit_inst(c, &(struct etna_inst) { @@ -595,7 +595,7 @@ emit_intrinsic(struct etna_compile *c, nir_intrinsic_instr * intr) emit_inst(c, &(struct etna_inst) { .opcode = INST_OPCODE_LOAD, .type = INST_TYPE_U32, - .dst = ra_def(c, &intr->dest.ssa, &dst_swiz), + .dst = ra_def(c, &intr->def, &dst_swiz), .src[0] = get_src(c, &intr->src[1]), .src[1] = const_src(c, &CONST_VAL(ETNA_UNIFORM_UBO0_ADDR + idx, 0), 1), }); @@ -911,7 +911,7 @@ lower_alu(struct etna_compile *c, nir_alu_instr *alu) break; case nir_instr_type_intrinsic: if (nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_input) { - need_mov = vec_dest_has_swizzle(alu, &nir_instr_as_intrinsic(instr)->dest.ssa); + need_mov = vec_dest_has_swizzle(alu, &nir_instr_as_intrinsic(instr)->def); break; } FALLTHROUGH; @@ -971,13 +971,13 @@ emit_shader(struct etna_compile *c, unsigned *num_temps, unsigned *num_consts) base += off[0].u32; nir_const_value value[4]; - for (unsigned i = 0; i < intr->dest.ssa.num_components; i++) + for (unsigned i = 0; i < intr->def.num_components; i++) value[i] = UNIFORM(base * 4 + i); b.cursor = nir_after_instr(instr); - nir_def *def = nir_build_imm(&b, intr->dest.ssa.num_components, 32, value); + nir_def *def = nir_build_imm(&b, intr->def.num_components, 32, value); - nir_def_rewrite_uses(&intr->dest.ssa, def); + nir_def_rewrite_uses(&intr->def, def); nir_instr_remove(instr); } break; default: diff --git a/src/gallium/drivers/etnaviv/etnaviv_compiler_nir.h b/src/gallium/drivers/etnaviv/etnaviv_compiler_nir.h index 2670c2e..ac2feed 100644 --- a/src/gallium/drivers/etnaviv/etnaviv_compiler_nir.h +++ b/src/gallium/drivers/etnaviv/etnaviv_compiler_nir.h @@ -284,7 +284,7 @@ def_for_instr(nir_instr *instr) def = &nir_instr_as_alu(instr)->def; break; case nir_instr_type_tex: - def = &nir_instr_as_tex(instr)->dest.ssa; + def = &nir_instr_as_tex(instr)->def; break; case nir_instr_type_intrinsic: { nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); @@ -294,7 +294,7 @@ def_for_instr(nir_instr *instr) intr->intrinsic == nir_intrinsic_load_instance_id || intr->intrinsic == nir_intrinsic_load_texture_scale || intr->intrinsic == nir_intrinsic_load_texture_size_etna) - def = &intr->dest.ssa; + def = &intr->def; } break; case nir_instr_type_deref: return NULL; diff --git a/src/gallium/drivers/etnaviv/etnaviv_compiler_nir_ra.c b/src/gallium/drivers/etnaviv/etnaviv_compiler_nir_ra.c index 9c14605..bf04473 100644 --- a/src/gallium/drivers/etnaviv/etnaviv_compiler_nir_ra.c +++ b/src/gallium/drivers/etnaviv/etnaviv_compiler_nir_ra.c @@ -168,7 +168,7 @@ etna_ra_assign(struct etna_compile *c, nir_shader *shader) nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); /* can't have dst swizzle or sparse writemask on UBO loads */ if (intr->intrinsic == nir_intrinsic_load_ubo) { - assert(def == &intr->dest.ssa); + assert(def == &intr->def); if (def->num_components == 2) comp = REG_CLASS_VIRT_VEC2C; if (def->num_components == 3) diff --git a/src/gallium/drivers/etnaviv/etnaviv_nir.c b/src/gallium/drivers/etnaviv/etnaviv_nir.c index 6d2e76a..72c0452 100644 --- a/src/gallium/drivers/etnaviv/etnaviv_nir.c +++ b/src/gallium/drivers/etnaviv/etnaviv_nir.c @@ -45,15 +45,15 @@ etna_lower_io(nir_shader *shader, struct etna_shader_variant *v) /* HW front_face is 0.0/1.0, not 0/~0u for bool * lower with a comparison with 0 */ - intr->dest.ssa.bit_size = 32; + intr->def.bit_size = 32; b.cursor = nir_after_instr(instr); - nir_def *ssa = nir_ine_imm(&b, &intr->dest.ssa, 0); + nir_def *ssa = nir_ine_imm(&b, &intr->def, 0); if (v->key.front_ccw) nir_instr_as_alu(ssa->parent_instr)->op = nir_op_ieq; - nir_def_rewrite_uses_after(&intr->dest.ssa, + nir_def_rewrite_uses_after(&intr->def, ssa, ssa->parent_instr); } break; diff --git a/src/gallium/drivers/etnaviv/etnaviv_nir_lower_texture.c b/src/gallium/drivers/etnaviv/etnaviv_nir_lower_texture.c index f638860..9da28ba 100644 --- a/src/gallium/drivers/etnaviv/etnaviv_nir_lower_texture.c +++ b/src/gallium/drivers/etnaviv/etnaviv_nir_lower_texture.c @@ -21,7 +21,7 @@ lower_txs(nir_builder *b, nir_instr *instr, UNUSED void *data) nir_def *idx = nir_imm_int(b, tex->texture_index); nir_def *sizes = nir_load_texture_size_etna(b, 32, idx); - nir_def_rewrite_uses(&tex->dest.ssa, sizes); + nir_def_rewrite_uses(&tex->def, sizes); return true; } diff --git a/src/gallium/drivers/etnaviv/etnaviv_nir_lower_ubo_to_uniform.c b/src/gallium/drivers/etnaviv/etnaviv_nir_lower_ubo_to_uniform.c index 7ef827f..13e571b 100644 --- a/src/gallium/drivers/etnaviv/etnaviv_nir_lower_ubo_to_uniform.c +++ b/src/gallium/drivers/etnaviv/etnaviv_nir_lower_ubo_to_uniform.c @@ -64,12 +64,12 @@ lower_ubo_to_uniform(nir_builder *b, nir_instr *instr, void *_data) nir_ushr_imm(b, nir_isub(b, ubo_offset, range_base), 4); nir_def *uniform = - nir_load_uniform(b, intr->num_components, intr->dest.ssa.bit_size, uniform_offset, + nir_load_uniform(b, intr->num_components, intr->def.bit_size, uniform_offset, .base = nir_intrinsic_range_base(intr) / 16, .range = nir_intrinsic_range(intr) / 16, .dest_type = nir_type_float32); - nir_def_rewrite_uses(&intr->dest.ssa, uniform); + nir_def_rewrite_uses(&intr->def, uniform); return uniform; } diff --git a/src/gallium/drivers/freedreno/a2xx/ir2_nir.c b/src/gallium/drivers/freedreno/a2xx/ir2_nir.c index 976f39d..1f5209c 100644 --- a/src/gallium/drivers/freedreno/a2xx/ir2_nir.c +++ b/src/gallium/drivers/freedreno/a2xx/ir2_nir.c @@ -630,7 +630,7 @@ emit_intrinsic(struct ir2_context *ctx, nir_intrinsic_instr *intr) break; case nir_intrinsic_load_input: - load_input(ctx, &intr->dest.ssa, nir_intrinsic_base(intr)); + load_input(ctx, &intr->def, nir_intrinsic_base(intr)); break; case nir_intrinsic_store_output: store_output(ctx, intr->src[0], output_slot(ctx, intr), @@ -641,7 +641,7 @@ emit_intrinsic(struct ir2_context *ctx, nir_intrinsic_instr *intr) assert(const_offset); /* TODO can be false in ES2? */ idx = nir_intrinsic_base(intr); idx += (uint32_t)const_offset[0].f32; - instr = instr_create_alu_dest(ctx, nir_op_mov, &intr->dest.ssa); + instr = instr_create_alu_dest(ctx, nir_op_mov, &intr->def); instr->src[0] = ir2_src(idx, 0, IR2_SRC_CONST); break; case nir_intrinsic_discard: @@ -668,7 +668,7 @@ emit_intrinsic(struct ir2_context *ctx, nir_intrinsic_instr *intr) struct ir2_instr *tmp = instr_create_alu(ctx, nir_op_frcp, 1); tmp->src[0] = ir2_src(ctx->f->inputs_count, 0, IR2_SRC_INPUT); - instr = instr_create_alu_dest(ctx, nir_op_sge, &intr->dest.ssa); + instr = instr_create_alu_dest(ctx, nir_op_sge, &intr->def); instr->src[0] = ir2_src(tmp->idx, 0, IR2_SRC_SSA); instr->src[1] = ir2_zero(ctx); break; @@ -676,7 +676,7 @@ emit_intrinsic(struct ir2_context *ctx, nir_intrinsic_instr *intr) /* param.zw (note: abs might be needed like fragcoord in param.xy?) */ ctx->so->need_param = true; - instr = instr_create_alu_dest(ctx, nir_op_mov, &intr->dest.ssa); + instr = instr_create_alu_dest(ctx, nir_op_mov, &intr->def); instr->src[0] = ir2_src(ctx->f->inputs_count, IR2_SWIZZLE_ZW, IR2_SRC_INPUT); break; @@ -769,7 +769,7 @@ emit_tex(struct ir2_context *ctx, nir_tex_instr *tex) /* TODO: lod/bias transformed by src_coord.z ? */ } - instr = ir2_instr_create_fetch(ctx, &tex->dest.ssa, TEX_FETCH); + instr = ir2_instr_create_fetch(ctx, &tex->def, TEX_FETCH); instr->src[0] = src_coord; instr->src[0].swizzle = is_cube ? IR2_SWIZZLE_YXW : 0; instr->fetch.tex.is_cube = is_cube; @@ -1194,9 +1194,9 @@ ir2_nir_compile(struct ir2_context *ctx, bool binning) nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->nir); nir_foreach_reg_decl (decl, fxn) { - assert(decl->dest.ssa.index < ARRAY_SIZE(ctx->reg)); - ctx->reg[decl->dest.ssa.index].ncomp = nir_intrinsic_num_components(decl); - ctx->reg_count = MAX2(ctx->reg_count, decl->dest.ssa.index + 1); + assert(decl->def.index < ARRAY_SIZE(ctx->reg)); + ctx->reg[decl->def.index].ncomp = nir_intrinsic_num_components(decl); + ctx->reg_count = MAX2(ctx->reg_count, decl->def.index + 1); } nir_metadata_require(fxn, nir_metadata_block_index); diff --git a/src/gallium/drivers/iris/iris_program.c b/src/gallium/drivers/iris/iris_program.c index f323628..fb1ee90 100644 --- a/src/gallium/drivers/iris/iris_program.c +++ b/src/gallium/drivers/iris/iris_program.c @@ -479,14 +479,14 @@ iris_setup_uniforms(ASSERTED const struct intel_device_info *devinfo, case nir_intrinsic_load_base_workgroup_id: { /* GL doesn't have a concept of base workgroup */ b.cursor = nir_instr_remove(&intrin->instr); - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, nir_imm_zero(&b, 3, 32)); continue; } case nir_intrinsic_load_constant: { - unsigned load_size = intrin->dest.ssa.num_components * - intrin->dest.ssa.bit_size / 8; - unsigned load_align = intrin->dest.ssa.bit_size / 8; + unsigned load_size = intrin->def.num_components * + intrin->def.bit_size / 8; + unsigned load_align = intrin->def.bit_size / 8; /* This one is special because it reads from the shader constant * data and not cbuf0 which gallium uploads for us. @@ -514,10 +514,10 @@ iris_setup_uniforms(ASSERTED const struct intel_device_info *devinfo, nir_def *data = nir_load_global_constant(&b, nir_u2u64(&b, const_data_addr), load_align, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); + intrin->def.num_components, + intrin->def.bit_size); - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, data); continue; } @@ -663,14 +663,14 @@ iris_setup_uniforms(ASSERTED const struct intel_device_info *devinfo, } nir_def *load = - nir_load_ubo(&b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size, + nir_load_ubo(&b, intrin->def.num_components, intrin->def.bit_size, temp_ubo_name, offset, .align_mul = 4, .align_offset = 0, .range_base = 0, .range = ~0); - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, load); nir_instr_remove(instr); } diff --git a/src/gallium/drivers/lima/ir/gp/nir.c b/src/gallium/drivers/lima/ir/gp/nir.c index 3ea8904..f85e356 100644 --- a/src/gallium/drivers/lima/ir/gp/nir.c +++ b/src/gallium/drivers/lima/ir/gp/nir.c @@ -228,14 +228,14 @@ static bool gpir_emit_intrinsic(gpir_block *block, nir_instr *ni) case nir_intrinsic_decl_reg: { gpir_reg *reg = gpir_create_reg(block->comp); - block->comp->reg_for_ssa[instr->dest.ssa.index] = reg; + block->comp->reg_for_ssa[instr->def.index] = reg; return true; } case nir_intrinsic_load_reg: { gpir_node *node = gpir_node_find(block, &instr->src[0], 0); assert(node); - block->comp->node_for_ssa[instr->dest.ssa.index] = node; + block->comp->node_for_ssa[instr->def.index] = node; return true; } case nir_intrinsic_store_reg: @@ -246,7 +246,7 @@ static bool gpir_emit_intrinsic(gpir_block *block, nir_instr *ni) return true; } case nir_intrinsic_load_input: - return gpir_create_load(block, &instr->dest.ssa, + return gpir_create_load(block, &instr->def, gpir_op_load_attribute, nir_intrinsic_base(instr), nir_intrinsic_component(instr)) != NULL; @@ -255,14 +255,14 @@ static bool gpir_emit_intrinsic(gpir_block *block, nir_instr *ni) int offset = nir_intrinsic_base(instr); offset += (int)nir_src_as_float(instr->src[0]); - return gpir_create_load(block, &instr->dest.ssa, + return gpir_create_load(block, &instr->def, gpir_op_load_uniform, offset / 4, offset % 4) != NULL; } case nir_intrinsic_load_viewport_scale: - return gpir_create_vector_load(block, &instr->dest.ssa, GPIR_VECTOR_SSA_VIEWPORT_SCALE); + return gpir_create_vector_load(block, &instr->def, GPIR_VECTOR_SSA_VIEWPORT_SCALE); case nir_intrinsic_load_viewport_offset: - return gpir_create_vector_load(block, &instr->dest.ssa, GPIR_VECTOR_SSA_VIEWPORT_OFFSET); + return gpir_create_vector_load(block, &instr->def, GPIR_VECTOR_SSA_VIEWPORT_OFFSET); case nir_intrinsic_store_output: { gpir_store_node *store = gpir_node_create(block, gpir_op_store_varying); diff --git a/src/gallium/drivers/lima/ir/lima_nir_duplicate_intrinsic.c b/src/gallium/drivers/lima/ir/lima_nir_duplicate_intrinsic.c index 4f20bc6..c3c5cc2 100644 --- a/src/gallium/drivers/lima/ir/lima_nir_duplicate_intrinsic.c +++ b/src/gallium/drivers/lima/ir/lima_nir_duplicate_intrinsic.c @@ -32,7 +32,7 @@ lima_nir_duplicate_intrinsic(nir_builder *b, nir_intrinsic_instr *itr, nir_intrinsic_instr *last_dupl = NULL; nir_instr *last_parent_instr = NULL; - nir_foreach_use_safe(use_src, &itr->dest.ssa) { + nir_foreach_use_safe(use_src, &itr->def) { nir_intrinsic_instr *dupl; if (last_parent_instr != use_src->parent_instr) { @@ -43,8 +43,8 @@ lima_nir_duplicate_intrinsic(nir_builder *b, nir_intrinsic_instr *itr, memcpy(dupl->const_index, itr->const_index, sizeof(itr->const_index)); dupl->src[0].ssa = itr->src[0].ssa; - nir_def_init(&dupl->instr, &dupl->dest.ssa, dupl->num_components, - itr->dest.ssa.bit_size); + nir_def_init(&dupl->instr, &dupl->def, dupl->num_components, + itr->def.bit_size); dupl->instr.pass_flags = 1; nir_builder_instr_insert(b, &dupl->instr); @@ -53,7 +53,7 @@ lima_nir_duplicate_intrinsic(nir_builder *b, nir_intrinsic_instr *itr, dupl = last_dupl; } - nir_instr_rewrite_src(use_src->parent_instr, use_src, nir_src_for_ssa(&dupl->dest.ssa)); + nir_instr_rewrite_src(use_src->parent_instr, use_src, nir_src_for_ssa(&dupl->def)); last_parent_instr = use_src->parent_instr; last_dupl = dupl; } @@ -61,7 +61,7 @@ lima_nir_duplicate_intrinsic(nir_builder *b, nir_intrinsic_instr *itr, last_dupl = NULL; last_parent_instr = NULL; - nir_foreach_if_use_safe(use_src, &itr->dest.ssa) { + nir_foreach_if_use_safe(use_src, &itr->def) { nir_intrinsic_instr *dupl; if (last_parent_instr != use_src->parent_instr) { @@ -72,8 +72,8 @@ lima_nir_duplicate_intrinsic(nir_builder *b, nir_intrinsic_instr *itr, memcpy(dupl->const_index, itr->const_index, sizeof(itr->const_index)); dupl->src[0].ssa = itr->src[0].ssa; - nir_def_init(&dupl->instr, &dupl->dest.ssa, dupl->num_components, - itr->dest.ssa.bit_size); + nir_def_init(&dupl->instr, &dupl->def, dupl->num_components, + itr->def.bit_size); dupl->instr.pass_flags = 1; nir_builder_instr_insert(b, &dupl->instr); @@ -82,7 +82,7 @@ lima_nir_duplicate_intrinsic(nir_builder *b, nir_intrinsic_instr *itr, dupl = last_dupl; } - nir_if_rewrite_condition(use_src->parent_if, nir_src_for_ssa(&dupl->dest.ssa)); + nir_if_rewrite_condition(use_src->parent_if, nir_src_for_ssa(&dupl->def)); last_parent_instr = use_src->parent_instr; last_dupl = dupl; } diff --git a/src/gallium/drivers/lima/ir/lima_nir_lower_txp.c b/src/gallium/drivers/lima/ir/lima_nir_lower_txp.c index aab32a8..3ba1dfe 100644 --- a/src/gallium/drivers/lima/ir/lima_nir_lower_txp.c +++ b/src/gallium/drivers/lima/ir/lima_nir_lower_txp.c @@ -54,7 +54,7 @@ get_proj_index(nir_instr *coord_instr, nir_instr *proj_instr, if (intrin->intrinsic != nir_intrinsic_load_input) return NULL; - if (intrin->dest.ssa.num_components != 4) + if (intrin->def.num_components != 4) return NULL; /* Coords must be in .xyz */ diff --git a/src/gallium/drivers/lima/ir/lima_nir_lower_uniform_to_scalar.c b/src/gallium/drivers/lima/ir/lima_nir_lower_uniform_to_scalar.c index 61bb162..4067746 100644 --- a/src/gallium/drivers/lima/ir/lima_nir_lower_uniform_to_scalar.c +++ b/src/gallium/drivers/lima/ir/lima_nir_lower_uniform_to_scalar.c @@ -35,8 +35,8 @@ lower_load_uniform_to_scalar(nir_builder *b, nir_intrinsic_instr *intr) for (unsigned i = 0; i < intr->num_components; i++) { nir_intrinsic_instr *chan_intr = nir_intrinsic_instr_create(b->shader, intr->intrinsic); - nir_def_init(&chan_intr->instr, &chan_intr->dest.ssa, 1, - intr->dest.ssa.bit_size); + nir_def_init(&chan_intr->instr, &chan_intr->def, 1, + intr->def.bit_size); chan_intr->num_components = 1; nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr) * 4 + i); @@ -48,10 +48,10 @@ lower_load_uniform_to_scalar(nir_builder *b, nir_intrinsic_instr *intr) nir_builder_instr_insert(b, &chan_intr->instr); - loads[i] = &chan_intr->dest.ssa; + loads[i] = &chan_intr->def; } - nir_def_rewrite_uses(&intr->dest.ssa, + nir_def_rewrite_uses(&intr->def, nir_vec(b, loads, intr->num_components)); nir_instr_remove(&intr->instr); } diff --git a/src/gallium/drivers/lima/ir/lima_nir_split_load_input.c b/src/gallium/drivers/lima/ir/lima_nir_split_load_input.c index c0bb785..4d299c3 100644 --- a/src/gallium/drivers/lima/ir/lima_nir_split_load_input.c +++ b/src/gallium/drivers/lima/ir/lima_nir_split_load_input.c @@ -69,7 +69,7 @@ lima_nir_split_load_input_instr(nir_builder *b, nir_intrinsic_instr *new_intrin = nir_intrinsic_instr_create( b->shader, intrin->intrinsic); - nir_def_init(&new_intrin->instr, &new_intrin->dest.ssa, + nir_def_init(&new_intrin->instr, &new_intrin->def, alu->def.num_components, ssa->bit_size); new_intrin->num_components = alu->def.num_components; nir_intrinsic_set_base(new_intrin, nir_intrinsic_base(intrin)); @@ -81,7 +81,7 @@ lima_nir_split_load_input_instr(nir_builder *b, nir_builder_instr_insert(b, &new_intrin->instr); nir_def_rewrite_uses(&alu->def, - &new_intrin->dest.ssa); + &new_intrin->def); nir_instr_remove(&alu->instr); return true; } diff --git a/src/gallium/drivers/lima/ir/lima_nir_split_loads.c b/src/gallium/drivers/lima/ir/lima_nir_split_loads.c index c347720..0d1e328 100644 --- a/src/gallium/drivers/lima/ir/lima_nir_split_loads.c +++ b/src/gallium/drivers/lima/ir/lima_nir_split_loads.c @@ -44,7 +44,7 @@ clone_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin) nir_builder_instr_insert(b, &new_intrin->instr); - return &new_intrin->dest.ssa; + return &new_intrin->def; } static bool @@ -59,7 +59,7 @@ replace_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin) struct hash_table *visited_instrs = _mesa_pointer_hash_table_create(NULL); - nir_foreach_use_safe(src, &intrin->dest.ssa) { + nir_foreach_use_safe(src, &intrin->def) { struct hash_entry *entry = _mesa_hash_table_search(visited_instrs, src->parent_instr); if (entry && (src->parent_instr->type != nir_instr_type_phi)) { @@ -72,7 +72,7 @@ replace_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin) nir_instr_rewrite_src(src->parent_instr, src, nir_src_for_ssa(new)); _mesa_hash_table_insert(visited_instrs, src->parent_instr, new); } - nir_foreach_if_use_safe(src, &intrin->dest.ssa) { + nir_foreach_if_use_safe(src, &intrin->def) { b->cursor = nir_before_src(src); nir_if_rewrite_condition(src->parent_if, nir_src_for_ssa(clone_intrinsic(b, intrin))); diff --git a/src/gallium/drivers/lima/ir/pp/nir.c b/src/gallium/drivers/lima/ir/pp/nir.c index f7f7a69..591ff43 100644 --- a/src/gallium/drivers/lima/ir/pp/nir.c +++ b/src/gallium/drivers/lima/ir/pp/nir.c @@ -175,7 +175,7 @@ static bool ppir_emit_alu(ppir_block *block, nir_instr *ni) /* Skip folded fabs/fneg since we do not have dead code elimination */ if ((instr->op == nir_op_fabs || instr->op == nir_op_fneg) && nir_legacy_float_mod_folds(instr)) { - /* Add parent node as a the folded dest ssa node to keep + /* Add parent node as a the folded def node to keep * the dependency chain */ nir_alu_src *ns = &instr->src[0]; ppir_node *parent = block->comp->var_nodes[ns->src.ssa->index]; @@ -291,7 +291,7 @@ static bool ppir_emit_intrinsic(ppir_block *block, nir_instr *ni) return true; case nir_intrinsic_load_reg: { - nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest.ssa); + nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->def); lnode = ppir_node_create_dest(block, ppir_op_dummy, &legacy_dest, mask); return true; } @@ -299,7 +299,7 @@ static bool ppir_emit_intrinsic(ppir_block *block, nir_instr *ni) case nir_intrinsic_load_input: { mask = u_bit_consecutive(0, instr->num_components); - nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest.ssa); + nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->def); lnode = ppir_node_create_dest(block, ppir_op_load_varying, &legacy_dest, mask); if (!lnode) return false; @@ -338,7 +338,7 @@ static bool ppir_emit_intrinsic(ppir_block *block, nir_instr *ni) break; } - nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest.ssa); + nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->def); lnode = ppir_node_create_dest(block, op, &legacy_dest, mask); if (!lnode) return false; @@ -351,7 +351,7 @@ static bool ppir_emit_intrinsic(ppir_block *block, nir_instr *ni) case nir_intrinsic_load_uniform: { mask = u_bit_consecutive(0, instr->num_components); - nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest.ssa); + nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->def); lnode = ppir_node_create_dest(block, ppir_op_load_uniform, &legacy_dest, mask); if (!lnode) return false; @@ -517,7 +517,7 @@ static bool ppir_emit_tex(ppir_block *block, nir_instr *ni) unsigned mask = 0; mask = u_bit_consecutive(0, nir_tex_instr_dest_size(instr)); - nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest.ssa); + nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->def); node = ppir_node_create_dest(block, ppir_op_load_texture, &legacy_dest, mask); if (!node) return false; @@ -991,7 +991,7 @@ bool ppir_compile_nir(struct lima_fs_compiled_shader *prog, struct nir_shader *n if (!r) return false; - r->index = decl->dest.ssa.index; + r->index = decl->def.index; r->num_components = nir_intrinsic_num_components(decl); r->is_head = false; list_addtail(&r->list, &comp->reg_list); diff --git a/src/gallium/drivers/panfrost/pan_nir_lower_sysvals.c b/src/gallium/drivers/panfrost/pan_nir_lower_sysvals.c index c42bcc1..da96cb5 100644 --- a/src/gallium/drivers/panfrost/pan_nir_lower_sysvals.c +++ b/src/gallium/drivers/panfrost/pan_nir_lower_sysvals.c @@ -138,14 +138,14 @@ lower(nir_builder *b, nir_instr *instr, void *data) if (instr->type == nir_instr_type_intrinsic) { nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); - old = &intr->dest.ssa; + old = &intr->def; sysval = sysval_for_intrinsic(intr, &offset); if (sysval == ~0) return false; } else if (instr->type == nir_instr_type_tex) { nir_tex_instr *tex = nir_instr_as_tex(instr); - old = &tex->dest.ssa; + old = &tex->def; if (tex->op != nir_texop_txs) return false; diff --git a/src/gallium/drivers/r600/sfn/sfn_instr_mem.cpp b/src/gallium/drivers/r600/sfn/sfn_instr_mem.cpp index d13cb45..395a4c2 100644 --- a/src/gallium/drivers/r600/sfn/sfn_instr_mem.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_instr_mem.cpp @@ -192,7 +192,7 @@ bool GDSInstr::emit_atomic_op2(nir_intrinsic_instr *instr, Shader& shader) { auto& vf = shader.value_factory(); - bool read_result = !list_is_empty(&instr->dest.ssa.uses); + bool read_result = !list_is_empty(&instr->def.uses); ESDOp op = read_result ? get_opcode(instr->intrinsic) : get_opcode_wo(instr->intrinsic); @@ -205,7 +205,7 @@ GDSInstr::emit_atomic_op2(nir_intrinsic_instr *instr, Shader& shader) } offset += nir_intrinsic_base(instr); - auto dest = read_result ? vf.dest(instr->dest.ssa, 0, pin_free) : nullptr; + auto dest = read_result ? vf.dest(instr->def, 0, pin_free) : nullptr; PRegister src_as_register = nullptr; auto src_val = vf.src(instr->src[1], 0); @@ -226,7 +226,7 @@ GDSInstr::emit_atomic_op2(nir_intrinsic_instr *instr, Shader& shader) ir = new GDSInstr(op, dest, src, offset, uav_id); } else { - auto dest = vf.dest(instr->dest.ssa, 0, pin_free); + auto dest = vf.dest(instr->def, 0, pin_free); auto tmp = vf.temp_vec4(pin_group, {0, 1, 7, 7}); if (uav_id) shader.emit_instruction(new AluInstr(op3_muladd_uint24, @@ -256,7 +256,7 @@ GDSInstr::emit_atomic_read(nir_intrinsic_instr *instr, Shader& shader) } offset += shader.remap_atomic_base(nir_intrinsic_base(instr)); - auto dest = vf.dest(instr->dest.ssa, 0, pin_free); + auto dest = vf.dest(instr->def, 0, pin_free); GDSInstr *ir = nullptr; @@ -287,7 +287,7 @@ bool GDSInstr::emit_atomic_inc(nir_intrinsic_instr *instr, Shader& shader) { auto& vf = shader.value_factory(); - bool read_result = !list_is_empty(&instr->dest.ssa.uses); + bool read_result = !list_is_empty(&instr->def.uses); auto [offset, uav_id] = shader.evaluate_resource_offset(instr, 0); { @@ -295,7 +295,7 @@ GDSInstr::emit_atomic_inc(nir_intrinsic_instr *instr, Shader& shader) offset += shader.remap_atomic_base(nir_intrinsic_base(instr)); GDSInstr *ir = nullptr; - auto dest = read_result ? vf.dest(instr->dest.ssa, 0, pin_free) : nullptr; + auto dest = read_result ? vf.dest(instr->def, 0, pin_free) : nullptr; if (shader.chip_class() < ISA_CC_CAYMAN) { RegisterVec4 src(nullptr, shader.atomic_update(), nullptr, nullptr, pin_chan); @@ -328,7 +328,7 @@ GDSInstr::emit_atomic_pre_dec(nir_intrinsic_instr *instr, Shader& shader) { auto& vf = shader.value_factory(); - bool read_result = !list_is_empty(&instr->dest.ssa.uses); + bool read_result = !list_is_empty(&instr->def.uses); auto opcode = read_result ? DS_OP_SUB_RET : DS_OP_SUB; @@ -366,7 +366,7 @@ GDSInstr::emit_atomic_pre_dec(nir_intrinsic_instr *instr, Shader& shader) shader.emit_instruction(ir); if (read_result) shader.emit_instruction(new AluInstr(op2_sub_int, - vf.dest(instr->dest.ssa, 0, pin_free), + vf.dest(instr->def, 0, pin_free), tmp_dest, vf.one_i(), AluInstr::last_write)); @@ -534,7 +534,7 @@ bool RatInstr::emit_ssbo_load(nir_intrinsic_instr *intr, Shader& shader) { auto& vf = shader.value_factory(); - auto dest = vf.dest_vec4(intr->dest.ssa, pin_group); + auto dest = vf.dest_vec4(intr->def, pin_group); /** src0 not used, should be some offset */ auto addr = vf.src(intr->src[1], 0); @@ -553,7 +553,7 @@ RatInstr::emit_ssbo_load(nir_intrinsic_instr *intr, Shader& shader) {0, 1, 2, 3} }; - int comp_idx = intr->dest.ssa.num_components - 1; + int comp_idx = intr->def.num_components - 1; auto [offset, res_offset] = shader.evaluate_resource_offset(intr, 0); { @@ -663,7 +663,7 @@ RatInstr::emit_ssbo_atomic_op(nir_intrinsic_instr *intr, Shader& shader) { } - bool read_result = !list_is_empty(&intr->dest.ssa.uses); + bool read_result = !list_is_empty(&intr->def.uses); auto opcode = read_result ? get_rat_opcode(nir_intrinsic_atomic_op(intr)) : get_rat_opcode_wo(nir_intrinsic_atomic_op(intr)); @@ -707,7 +707,7 @@ RatInstr::emit_ssbo_atomic_op(nir_intrinsic_instr *intr, Shader& shader) atomic->set_ack(); if (read_result) { atomic->set_instr_flag(ack_rat_return_write); - auto dest = vf.dest_vec4(intr->dest.ssa, pin_group); + auto dest = vf.dest_vec4(intr->def, pin_group); auto fetch = new FetchInstr(vc_fetch, dest, @@ -737,7 +737,7 @@ bool RatInstr::emit_ssbo_size(nir_intrinsic_instr *intr, Shader& shader) { auto& vf = shader.value_factory(); - auto dest = vf.dest_vec4(intr->dest.ssa, pin_group); + auto dest = vf.dest_vec4(intr->def, pin_group); auto const_offset = nir_src_as_const_value(intr->src[0]); int res_id = R600_IMAGE_REAL_RESOURCE_OFFSET; @@ -800,7 +800,7 @@ RatInstr::emit_image_load_or_atomic(nir_intrinsic_instr *intrin, Shader& shader) { } - bool read_result = !list_is_empty(&intrin->dest.ssa.uses); + bool read_result = !list_is_empty(&intrin->def.uses); bool image_load = (intrin->intrinsic == nir_intrinsic_image_load); auto opcode = image_load ? RatInstr::NOP_RTN : read_result ? get_rat_opcode(nir_intrinsic_atomic_op(intrin)) @@ -847,7 +847,7 @@ RatInstr::emit_image_load_or_atomic(nir_intrinsic_instr *intrin, Shader& shader) atomic->set_ack(); if (read_result) { atomic->set_instr_flag(ack_rat_return_write); - auto dest = vf.dest_vec4(intrin->dest.ssa, pin_group); + auto dest = vf.dest_vec4(intrin->def, pin_group); pipe_format format = nir_intrinsic_format(intrin); unsigned fmt = fmt_32; @@ -903,17 +903,17 @@ RatInstr::emit_image_size(nir_intrinsic_instr *intrin, Shader& shader) dyn_offset = shader.emit_load_to_register(vf.src(intrin->src[0], 0)); if (nir_intrinsic_image_dim(intrin) == GLSL_SAMPLER_DIM_BUF) { - auto dest = vf.dest_vec4(intrin->dest.ssa, pin_group); + auto dest = vf.dest_vec4(intrin->def, pin_group); shader.emit_instruction(new QueryBufferSizeInstr(dest, {0, 1, 2, 3}, res_id)); return true; } else { if (nir_intrinsic_image_dim(intrin) == GLSL_SAMPLER_DIM_CUBE && nir_intrinsic_image_array(intrin) && - intrin->dest.ssa.num_components > 2) { + intrin->def.num_components > 2) { /* Need to load the layers from a const buffer */ - auto dest = vf.dest_vec4(intrin->dest.ssa, pin_group); + auto dest = vf.dest_vec4(intrin->def, pin_group); shader.emit_instruction(new TexInstr(TexInstr::get_resinfo, dest, {0, 1, 7, 3}, @@ -977,7 +977,7 @@ RatInstr::emit_image_size(nir_intrinsic_instr *intrin, Shader& shader) op3_cnde_int, dest[2], low_bit, comp1, comp2, AluInstr::last_write)); } } else { - auto dest = vf.dest_vec4(intrin->dest.ssa, pin_group); + auto dest = vf.dest_vec4(intrin->def, pin_group); shader.emit_instruction(new TexInstr(TexInstr::get_resinfo, dest, {0, 1, 2, 3}, @@ -998,7 +998,7 @@ RatInstr::emit_image_samples(nir_intrinsic_instr *intrin, Shader& shader) auto src = RegisterVec4(0, true, {4, 4, 4, 4}); auto tmp = shader.value_factory().temp_vec4(pin_group); - auto dest = shader.value_factory().dest(intrin->dest.ssa, 0, pin_free); + auto dest = shader.value_factory().dest(intrin->def, 0, pin_free); auto const_offset = nir_src_as_const_value(intrin->src[0]); PRegister dyn_offset = nullptr; diff --git a/src/gallium/drivers/r600/sfn/sfn_instr_tex.cpp b/src/gallium/drivers/r600/sfn/sfn_instr_tex.cpp index 05576b4..c313e72 100644 --- a/src/gallium/drivers/r600/sfn/sfn_instr_tex.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_instr_tex.cpp @@ -535,7 +535,7 @@ TexInstr::emit_lowered_tex(nir_tex_instr *tex, Inputs& src, Shader& shader) int32_t inst_mode = params[2].i32; uint32_t dst_swz_packed = params[3].u32; - auto dst = vf.dest_vec4(tex->dest.ssa, pin_group); + auto dst = vf.dest_vec4(tex->def, pin_group); RegisterVec4::Swizzle src_swizzle = {0}; for (int i = 0; i < 4; ++i) @@ -580,7 +580,7 @@ bool TexInstr::emit_buf_txf(nir_tex_instr *tex, Inputs& src, Shader& shader) { auto& vf = shader.value_factory(); - auto dst = vf.dest_vec4(tex->dest.ssa, pin_group); + auto dst = vf.dest_vec4(tex->def, pin_group); PRegister tex_offset = nullptr; if (src.resource_offset) @@ -633,7 +633,7 @@ TexInstr::emit_buf_txf(nir_tex_instr *tex, Inputs& src, Shader& shader) bool TexInstr::emit_tex_texture_samples(nir_tex_instr *instr, Inputs& src, Shader& shader) { - RegisterVec4 dest = shader.value_factory().dest_vec4(instr->dest.ssa, pin_chan); + RegisterVec4 dest = shader.value_factory().dest_vec4(instr->def, pin_chan); RegisterVec4 help{ 0, true, {4, 4, 4, 4} }; @@ -655,7 +655,7 @@ TexInstr::emit_tex_txs(nir_tex_instr *tex, { auto& vf = shader.value_factory(); - auto dest = vf.dest_vec4(tex->dest.ssa, pin_group); + auto dest = vf.dest_vec4(tex->def, pin_group); if (tex->sampler_dim == GLSL_SAMPLER_DIM_BUF) { if (shader.chip_class() >= ISA_CC_EVERGREEN) { @@ -885,7 +885,7 @@ TexInstr::emit_tex_lod(nir_tex_instr *tex, Inputs& src, Shader& shader) auto sampler = get_sampler_id(tex->sampler_index, src.sampler_deref); assert(!sampler.indirect && "Indirect sampler selection not yet supported"); - auto dst = shader.value_factory().dest_vec4(tex->dest.ssa, pin_group); + auto dst = shader.value_factory().dest_vec4(tex->def, pin_group); auto swizzle = src.swizzle_from_ncomps(tex->coord_components); @@ -1109,7 +1109,7 @@ LowerTexToBackend::lower_txf_ms(nir_tex_instr *tex) } auto fetch_sample = nir_instr_as_tex(nir_instr_clone(b->shader, &tex->instr)); - nir_def_init(&fetch_sample->instr, &fetch_sample->dest.ssa, 4, 32); + nir_def_init(&fetch_sample->instr, &fetch_sample->def, 4, 32); int used_coord_mask = 0; nir_def *backend1 = prep_src(new_coord, used_coord_mask); @@ -1120,7 +1120,7 @@ LowerTexToBackend::lower_txf_ms(nir_tex_instr *tex) new_coord[3] = nir_iand_imm(b, nir_ushr(b, - nir_channel(b, &fetch_sample->dest.ssa, 0), + nir_channel(b, &fetch_sample->def, 0), nir_ishl_imm(b, new_coord[3], 2)), 15); diff --git a/src/gallium/drivers/r600/sfn/sfn_nir.cpp b/src/gallium/drivers/r600/sfn/sfn_nir.cpp index a49f853..e087245 100644 --- a/src/gallium/drivers/r600/sfn/sfn_nir.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_nir.cpp @@ -98,7 +98,7 @@ r600_nir_lower_scratch_address_impl(nir_builder *b, nir_intrinsic_instr *instr) align = instr->src[0].ssa->num_components; address_index = 1; } else { - align = instr->dest.ssa.num_components; + align = instr->def.num_components; } nir_def *address = instr->src[address_index].ssa; @@ -337,7 +337,7 @@ private: nir_intrinsic_set_base(intr, new_base); nir_instr_rewrite_src(instr, &intr->src[0], nir_src_for_ssa(new_bufid->ssa)); - return &intr->dest.ssa; + return &intr->def; } }; @@ -521,7 +521,7 @@ r600_lower_shared_io_impl(nir_function_impl *impl) if (op->intrinsic == nir_intrinsic_load_shared) { nir_def *addr = op->src[0].ssa; - switch (op->dest.ssa.num_components) { + switch (op->def.num_components) { case 2: { auto addr2 = nir_iadd_imm(&b, addr, 4); addr = nir_vec2(&b, addr, addr2); @@ -541,11 +541,11 @@ r600_lower_shared_io_impl(nir_function_impl *impl) auto load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_local_shared_r600); - load->num_components = op->dest.ssa.num_components; + load->num_components = op->def.num_components; load->src[0] = nir_src_for_ssa(addr); - nir_def_init(&load->instr, &load->dest.ssa, load->num_components, + nir_def_init(&load->instr, &load->def, load->num_components, 32); - nir_def_rewrite_uses(&op->dest.ssa, &load->dest.ssa); + nir_def_rewrite_uses(&op->def, &load->def); nir_builder_instr_insert(&b, &load->instr); } else { nir_def *addr = op->src[1].ssa; @@ -595,8 +595,8 @@ r600_lower_fs_pos_input_impl(nir_builder *b, nir_instr *instr, void *_options) (void)_options; auto old_ir = nir_instr_as_intrinsic(instr); auto load = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_input); - nir_def_init(&load->instr, &load->dest.ssa, - old_ir->dest.ssa.num_components, old_ir->dest.ssa.bit_size); + nir_def_init(&load->instr, &load->def, + old_ir->def.num_components, old_ir->def.bit_size); nir_intrinsic_set_io_semantics(load, nir_intrinsic_io_semantics(old_ir)); nir_intrinsic_set_base(load, nir_intrinsic_base(old_ir)); @@ -605,7 +605,7 @@ r600_lower_fs_pos_input_impl(nir_builder *b, nir_instr *instr, void *_options) load->num_components = old_ir->num_components; load->src[0] = old_ir->src[1]; nir_builder_instr_insert(b, &load->instr); - return &load->dest.ssa; + return &load->def; } bool diff --git a/src/gallium/drivers/r600/sfn/sfn_nir_legalize_image_load_store.cpp b/src/gallium/drivers/r600/sfn/sfn_nir_legalize_image_load_store.cpp index 1831d9d..904aaf3 100644 --- a/src/gallium/drivers/r600/sfn/sfn_nir_legalize_image_load_store.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_nir_legalize_image_load_store.cpp @@ -46,7 +46,7 @@ r600_legalize_image_load_store_impl(nir_builder *b, if (load_value) default_value = - nir_imm_zero(b, ir->dest.ssa.num_components, ir->dest.ssa.bit_size); + nir_imm_zero(b, ir->def.num_components, ir->def.bit_size); auto image_exists = nir_ult_imm(b, ir->src[0].ssa, b->shader->info.num_images); @@ -128,7 +128,7 @@ r600_legalize_image_load_store_impl(nir_builder *b, nir_builder_instr_insert(b, new_load); if (load_value) - result = &new_load_ir->dest.ssa; + result = &new_load_ir->def; if (ir->intrinsic != nir_intrinsic_image_size) { /* Access is out of range start */ diff --git a/src/gallium/drivers/r600/sfn/sfn_nir_lower_64bit.cpp b/src/gallium/drivers/r600/sfn/sfn_nir_lower_64bit.cpp index 654cf7c..86a8dad 100644 --- a/src/gallium/drivers/r600/sfn/sfn_nir_lower_64bit.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_nir_lower_64bit.cpp @@ -112,17 +112,17 @@ LowerLoad64Uniform::filter(const nir_instr *instr) const intr->intrinsic != nir_intrinsic_load_ubo_vec4) return false; - return intr->dest.ssa.bit_size == 64; + return intr->def.bit_size == 64; } nir_def * LowerLoad64Uniform::lower(nir_instr *instr) { auto intr = nir_instr_as_intrinsic(instr); - int old_components = intr->dest.ssa.num_components; + int old_components = intr->def.num_components; assert(old_components <= 2); - intr->dest.ssa.num_components *= 2; - intr->dest.ssa.bit_size = 32; + intr->def.num_components *= 2; + intr->def.bit_size = 32; intr->num_components *= 2; if (intr->intrinsic == nir_intrinsic_load_ubo || @@ -133,8 +133,8 @@ LowerLoad64Uniform::lower(nir_instr *instr) for (int i = 0; i < old_components; ++i) { result_vec[i] = nir_pack_64_2x32_split(b, - nir_channel(b, &intr->dest.ssa, 2 * i), - nir_channel(b, &intr->dest.ssa, 2 * i + 1)); + nir_channel(b, &intr->def, 2 * i), + nir_channel(b, &intr->def, 2 * i + 1)); } if (old_components == 1) return result_vec[0]; @@ -170,7 +170,7 @@ class LowerSplit64op : public NirLowerInstruction { } case nir_instr_type_phi: { auto phi = nir_instr_as_phi(instr); - return phi->dest.ssa.num_components == 64; + return phi->def.num_components == 64; } default: return false; @@ -248,9 +248,9 @@ class LowerSplit64op : public NirLowerInstruction { auto phi_lo = nir_phi_instr_create(b->shader); auto phi_hi = nir_phi_instr_create(b->shader); nir_def_init( - &phi_lo->instr, &phi_lo->dest.ssa, phi->dest.ssa.num_components * 2, 32); + &phi_lo->instr, &phi_lo->def, phi->def.num_components * 2, 32); nir_def_init( - &phi_hi->instr, &phi_hi->dest.ssa, phi->dest.ssa.num_components * 2, 32); + &phi_hi->instr, &phi_hi->def, phi->def.num_components * 2, 32); nir_foreach_phi_src(s, phi) { auto lo = nir_unpack_32_2x16_split_x(b, nir_ssa_for_src(b, s->src, 1)); @@ -258,7 +258,7 @@ class LowerSplit64op : public NirLowerInstruction { nir_phi_instr_add_src(phi_lo, s->pred, nir_src_for_ssa(lo)); nir_phi_instr_add_src(phi_hi, s->pred, nir_src_for_ssa(hi)); } - return nir_pack_64_2x32_split(b, &phi_lo->dest.ssa, &phi_hi->dest.ssa); + return nir_pack_64_2x32_split(b, &phi_lo->def, &phi_hi->def); } default: unreachable("Trying to lower instruction that was not in filter"); @@ -285,9 +285,9 @@ LowerSplit64BitVar::filter(const nir_instr *instr) const case nir_intrinsic_load_input: case nir_intrinsic_load_ubo: case nir_intrinsic_load_ssbo: - if (intr->dest.ssa.bit_size != 64) + if (intr->def.bit_size != 64) return false; - return intr->dest.ssa.num_components >= 3; + return intr->def.num_components >= 3; case nir_intrinsic_store_output: if (nir_src_bit_size(intr->src[0]) != 64) return false; @@ -400,13 +400,13 @@ LowerSplit64BitVar::split_load_deref_array(nir_intrinsic_instr *intr, nir_src& i auto deref1 = nir_build_deref_var(b, vars.first); auto deref_array1 = nir_build_deref_array(b, deref1, nir_ssa_for_src(b, index, 1)); auto load1 = - nir_build_load_deref(b, 2, 64, &deref_array1->dest.ssa, (enum gl_access_qualifier)0); + nir_build_load_deref(b, 2, 64, &deref_array1->def, (enum gl_access_qualifier)0); auto deref2 = nir_build_deref_var(b, vars.second); auto deref_array2 = nir_build_deref_array(b, deref2, nir_ssa_for_src(b, index, 1)); auto load2 = nir_build_load_deref( - b, old_components - 2, 64, &deref_array2->dest.ssa, (enum gl_access_qualifier)0); + b, old_components - 2, 64, &deref_array2->def, (enum gl_access_qualifier)0); return merge_64bit_loads(load1, load2, old_components == 3); } @@ -428,7 +428,7 @@ LowerSplit64BitVar::split_store_deref_array(nir_intrinsic_instr *intr, auto deref_array1 = nir_build_deref_array(b, deref1, nir_ssa_for_src(b, deref->arr.index, 1)); - nir_build_store_deref(b, &deref_array1->dest.ssa, src_xy, 3); + nir_build_store_deref(b, &deref_array1->def, src_xy, 3); auto deref2 = nir_build_deref_var(b, vars.second); auto deref_array2 = @@ -436,12 +436,12 @@ LowerSplit64BitVar::split_store_deref_array(nir_intrinsic_instr *intr, if (old_components == 3) nir_build_store_deref(b, - &deref_array2->dest.ssa, + &deref_array2->def, nir_channel(b, intr->src[1].ssa, 2), 1); else nir_build_store_deref(b, - &deref_array2->dest.ssa, + &deref_array2->def, nir_channels(b, intr->src[1].ssa, 0xc), 3); @@ -462,14 +462,14 @@ LowerSplit64BitVar::split_store_deref_var(nir_intrinsic_instr *intr, auto vars = get_var_pair(old_var); auto deref1 = nir_build_deref_var(b, vars.first); - nir_build_store_deref(b, &deref1->dest.ssa, src_xy, 3); + nir_build_store_deref(b, &deref1->def, src_xy, 3); auto deref2 = nir_build_deref_var(b, vars.second); if (old_components == 3) - nir_build_store_deref(b, &deref2->dest.ssa, nir_channel(b, intr->src[1].ssa, 2), 1); + nir_build_store_deref(b, &deref2->def, nir_channel(b, intr->src[1].ssa, 2), 1); else nir_build_store_deref(b, - &deref2->dest.ssa, + &deref2->def, nir_channels(b, intr->src[1].ssa, 0xc), 3); @@ -532,21 +532,21 @@ LowerSplit64BitVar::get_var_pair(nir_variable *old_var) nir_def * LowerSplit64BitVar::split_double_load(nir_intrinsic_instr *load1) { - unsigned old_components = load1->dest.ssa.num_components; + unsigned old_components = load1->def.num_components; auto load2 = nir_instr_as_intrinsic(nir_instr_clone(b->shader, &load1->instr)); nir_io_semantics sem = nir_intrinsic_io_semantics(load1); - load1->dest.ssa.num_components = 2; + load1->def.num_components = 2; sem.num_slots = 1; nir_intrinsic_set_io_semantics(load1, sem); - load2->dest.ssa.num_components = old_components - 2; + load2->def.num_components = old_components - 2; sem.location += 1; nir_intrinsic_set_io_semantics(load2, sem); nir_intrinsic_set_base(load2, nir_intrinsic_base(load1) + 1); nir_builder_instr_insert(b, &load2->instr); - return merge_64bit_loads(&load1->dest.ssa, &load2->dest.ssa, old_components == 3); + return merge_64bit_loads(&load1->def, &load2->def, old_components == 3); } nir_def * @@ -580,7 +580,7 @@ LowerSplit64BitVar::split_store_output(nir_intrinsic_instr *store1) nir_def * LowerSplit64BitVar::split_double_load_uniform(nir_intrinsic_instr *intr) { - unsigned second_components = intr->dest.ssa.num_components - 2; + unsigned second_components = intr->def.num_components - 2; nir_intrinsic_instr *load2 = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform); load2->src[0] = nir_src_for_ssa(nir_iadd_imm(b, intr->src[0].ssa, 1)); @@ -589,48 +589,48 @@ LowerSplit64BitVar::split_double_load_uniform(nir_intrinsic_instr *intr) nir_intrinsic_set_range(load2, nir_intrinsic_range(intr)); load2->num_components = second_components; - nir_def_init(&load2->instr, &load2->dest.ssa, second_components, 64); + nir_def_init(&load2->instr, &load2->def, second_components, 64); nir_builder_instr_insert(b, &load2->instr); - intr->dest.ssa.num_components = intr->num_components = 2; + intr->def.num_components = intr->num_components = 2; if (second_components == 1) return nir_vec3(b, - nir_channel(b, &intr->dest.ssa, 0), - nir_channel(b, &intr->dest.ssa, 1), - nir_channel(b, &load2->dest.ssa, 0)); + nir_channel(b, &intr->def, 0), + nir_channel(b, &intr->def, 1), + nir_channel(b, &load2->def, 0)); else return nir_vec4(b, - nir_channel(b, &intr->dest.ssa, 0), - nir_channel(b, &intr->dest.ssa, 1), - nir_channel(b, &load2->dest.ssa, 0), - nir_channel(b, &load2->dest.ssa, 1)); + nir_channel(b, &intr->def, 0), + nir_channel(b, &intr->def, 1), + nir_channel(b, &load2->def, 0), + nir_channel(b, &load2->def, 1)); } nir_def * LowerSplit64BitVar::split_double_load_ssbo(nir_intrinsic_instr *intr) { - unsigned second_components = intr->dest.ssa.num_components - 2; + unsigned second_components = intr->def.num_components - 2; nir_intrinsic_instr *load2 = nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intr->instr)); auto new_src0 = nir_src_for_ssa(nir_iadd_imm(b, intr->src[0].ssa, 1)); nir_instr_rewrite_src(&load2->instr, &load2->src[0], new_src0); load2->num_components = second_components; - nir_def_init(&load2->instr, &load2->dest.ssa, second_components, 64); + nir_def_init(&load2->instr, &load2->def, second_components, 64); nir_intrinsic_set_dest_type(load2, nir_intrinsic_dest_type(intr)); nir_builder_instr_insert(b, &load2->instr); - intr->dest.ssa.num_components = intr->num_components = 2; + intr->def.num_components = intr->num_components = 2; - return merge_64bit_loads(&intr->dest.ssa, &load2->dest.ssa, second_components == 1); + return merge_64bit_loads(&intr->def, &load2->def, second_components == 1); } nir_def * LowerSplit64BitVar::split_double_load_ubo(nir_intrinsic_instr *intr) { - unsigned second_components = intr->dest.ssa.num_components - 2; + unsigned second_components = intr->def.num_components - 2; nir_intrinsic_instr *load2 = nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intr->instr)); load2->src[0] = intr->src[0]; @@ -643,12 +643,12 @@ LowerSplit64BitVar::split_double_load_ubo(nir_intrinsic_instr *intr) load2->num_components = second_components; - nir_def_init(&load2->instr, &load2->dest.ssa, second_components, 64); + nir_def_init(&load2->instr, &load2->def, second_components, 64); nir_builder_instr_insert(b, &load2->instr); - intr->dest.ssa.num_components = intr->num_components = 2; + intr->def.num_components = intr->num_components = 2; - return merge_64bit_loads(&intr->dest.ssa, &load2->dest.ssa, second_components == 1); + return merge_64bit_loads(&intr->def, &load2->def, second_components == 1); } nir_def * @@ -833,7 +833,7 @@ Lower64BitToVec2::filter(const nir_instr *instr) const case nir_intrinsic_load_global: case nir_intrinsic_load_ubo_vec4: case nir_intrinsic_load_ssbo: - return intr->dest.ssa.bit_size == 64; + return intr->def.bit_size == 64; case nir_intrinsic_store_deref: { if (nir_src_bit_size(intr->src[1]) == 64) return true; @@ -854,7 +854,7 @@ Lower64BitToVec2::filter(const nir_instr *instr) const } case nir_instr_type_phi: { auto phi = nir_instr_as_phi(instr); - return phi->dest.ssa.bit_size == 64; + return phi->def.bit_size == 64; } case nir_instr_type_load_const: { auto lc = nir_instr_as_load_const(instr); @@ -918,8 +918,8 @@ Lower64BitToVec2::lower(nir_instr *instr) } case nir_instr_type_phi: { auto phi = nir_instr_as_phi(instr); - phi->dest.ssa.bit_size = 32; - phi->dest.ssa.num_components = 2; + phi->def.bit_size = 32; + phi->def.num_components = 2; return NIR_LOWER_INSTR_PROGRESS; } case nir_instr_type_load_const: { @@ -973,8 +973,8 @@ Lower64BitToVec2::load_deref_64_to_vec2(nir_intrinsic_instr *intr) } intr->num_components = components; - intr->dest.ssa.bit_size = 32; - intr->dest.ssa.num_components = components; + intr->def.bit_size = 32; + intr->def.num_components = components; return NIR_LOWER_INSTR_PROGRESS; } @@ -1013,8 +1013,8 @@ nir_def * Lower64BitToVec2::load_uniform_64_to_vec2(nir_intrinsic_instr *intr) { intr->num_components *= 2; - intr->dest.ssa.bit_size = 32; - intr->dest.ssa.num_components *= 2; + intr->def.bit_size = 32; + intr->def.num_components *= 2; nir_intrinsic_set_dest_type(intr, nir_type_float32); return NIR_LOWER_INSTR_PROGRESS; } @@ -1023,8 +1023,8 @@ nir_def * Lower64BitToVec2::load_64_to_vec2(nir_intrinsic_instr *intr) { intr->num_components *= 2; - intr->dest.ssa.bit_size = 32; - intr->dest.ssa.num_components *= 2; + intr->def.bit_size = 32; + intr->def.num_components *= 2; nir_intrinsic_set_component(intr, nir_intrinsic_component(intr) * 2); return NIR_LOWER_INSTR_PROGRESS; } @@ -1033,8 +1033,8 @@ nir_def * Lower64BitToVec2::load_ssbo_64_to_vec2(nir_intrinsic_instr *intr) { intr->num_components *= 2; - intr->dest.ssa.bit_size = 32; - intr->dest.ssa.num_components *= 2; + intr->def.bit_size = 32; + intr->def.num_components *= 2; return NIR_LOWER_INSTR_PROGRESS; } @@ -1286,7 +1286,7 @@ r600_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr) bool has_dest = nir_intrinsic_infos[instr->intrinsic].has_dest; if (has_dest) { - if (instr->dest.ssa.bit_size != 64) + if (instr->def.bit_size != 64) return false; } else { if (nir_src_bit_size(instr->src[0]) != 64) @@ -1320,8 +1320,8 @@ r600_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr) first->num_components = 2; second->num_components -= 2; if (has_dest) { - first->dest.ssa.num_components = 2; - second->dest.ssa.num_components -= 2; + first->def.num_components = 2; + second->def.num_components -= 2; } nir_builder_instr_insert(b, &first->instr); @@ -1330,13 +1330,13 @@ r600_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr) if (has_dest) { /* Merge the two loads' results back into a vector. */ nir_scalar channels[4] = { - nir_get_ssa_scalar(&first->dest.ssa, 0), - nir_get_ssa_scalar(&first->dest.ssa, 1), - nir_get_ssa_scalar(&second->dest.ssa, 0), - nir_get_ssa_scalar(&second->dest.ssa, second->num_components > 1 ? 1 : 0), + nir_get_ssa_scalar(&first->def, 0), + nir_get_ssa_scalar(&first->def, 1), + nir_get_ssa_scalar(&second->def, 0), + nir_get_ssa_scalar(&second->def, second->num_components > 1 ? 1 : 0), }; nir_def *new_ir = nir_vec_scalars(b, channels, instr->num_components); - nir_def_rewrite_uses(&instr->dest.ssa, new_ir); + nir_def_rewrite_uses(&instr->def, new_ir); } else { /* Split the src value across the two stores. */ b->cursor = nir_before_instr(&instr->instr); diff --git a/src/gallium/drivers/r600/sfn/sfn_nir_lower_fs_out_to_vector.cpp b/src/gallium/drivers/r600/sfn/sfn_nir_lower_fs_out_to_vector.cpp index 22c03ba..c6602b2 100644 --- a/src/gallium/drivers/r600/sfn/sfn_nir_lower_fs_out_to_vector.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_nir_lower_fs_out_to_vector.cpp @@ -428,7 +428,7 @@ NirLowerFSOutToVector::create_new_io(nir_builder *b, nir_deref_instr *deref = nir_build_deref_var(b, var); deref = clone_deref_array(b, deref, nir_src_as_deref(intr->src[0])); - new_intr->src[0] = nir_src_for_ssa(&deref->dest.ssa); + new_intr->src[0] = nir_src_for_ssa(&deref->def); new_intr->src[1] = nir_src_for_ssa(create_combined_vector(b, srcs, first_comp, num_comps)); diff --git a/src/gallium/drivers/r600/sfn/sfn_nir_lower_tess_io.cpp b/src/gallium/drivers/r600/sfn/sfn_nir_lower_tess_io.cpp index b46c89e..86715dc 100644 --- a/src/gallium/drivers/r600/sfn/sfn_nir_lower_tess_io.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_nir_lower_tess_io.cpp @@ -55,9 +55,9 @@ static nir_def * emit_load_param_base(nir_builder *b, nir_intrinsic_op op) { nir_intrinsic_instr *result = nir_intrinsic_instr_create(b->shader, op); - nir_def_init(&result->instr, &result->dest.ssa, 4, 32); + nir_def_init(&result->instr, &result->def, 4, 32); nir_builder_instr_insert(b, &result->instr); - return &result->dest.ssa; + return &result->def; } static int @@ -213,9 +213,9 @@ static uint32_t get_dest_usee_mask(nir_intrinsic_instr *op) { MaskQuery mq = {0}; - mq.full_mask = (1 << op->dest.ssa.num_components) - 1; + mq.full_mask = (1 << op->def.num_components) - 1; - nir_foreach_use(use_src, &op->dest.ssa) + nir_foreach_use(use_src, &op->def) { auto use_instr = use_src->parent_instr; mq.ssa_index = use_src->ssa->index; @@ -264,7 +264,7 @@ replace_load_instr(nir_builder *b, nir_intrinsic_instr *op, nir_def *addr) auto new_load = nir_load_local_shared_r600(b, 32, addr_outer); auto undef = nir_undef(b, 1, 32); - int comps = op->dest.ssa.num_components; + int comps = op->def.num_components; nir_def *remix[4] = {undef, undef, undef, undef}; int chan = 0; @@ -274,7 +274,7 @@ replace_load_instr(nir_builder *b, nir_intrinsic_instr *op, nir_def *addr) } } auto new_load_remixed = nir_vec(b, remix, comps); - nir_def_rewrite_uses(&op->dest.ssa, new_load_remixed); + nir_def_rewrite_uses(&op->def, new_load_remixed); } nir_instr_remove(&op->instr); } @@ -284,9 +284,9 @@ r600_load_rel_patch_id(nir_builder *b) { auto patch_id = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_tcs_rel_patch_id_r600); - nir_def_init(&patch_id->instr, &patch_id->dest.ssa, 1, 32); + nir_def_init(&patch_id->instr, &patch_id->def, 1, 32); nir_builder_instr_insert(b, &patch_id->instr); - return &patch_id->dest.ssa; + return &patch_id->def; } static void @@ -381,7 +381,7 @@ r600_lower_tess_io_impl(nir_builder *b, nir_instr *instr, enum mesa_prim prim_ty auto base = emit_load_param_base(b, nir_intrinsic_load_tcs_in_param_base_r600); vertices_in = nir_channel(b, base, 2); } - nir_def_rewrite_uses(&op->dest.ssa, vertices_in); + nir_def_rewrite_uses(&op->def, vertices_in); nir_instr_remove(&op->instr); return true; } @@ -452,17 +452,17 @@ r600_lower_tess_io_impl(nir_builder *b, nir_instr *instr, enum mesa_prim prim_ty nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_local_shared_r600); tf->num_components = ncomps; tf->src[0] = nir_src_for_ssa(addr_outer); - nir_def_init(&tf->instr, &tf->dest.ssa, tf->num_components, 32); + nir_def_init(&tf->instr, &tf->def, tf->num_components, 32); nir_builder_instr_insert(b, &tf->instr); if (ncomps < 4 && b->shader->info.stage != MESA_SHADER_TESS_EVAL) { auto undef = nir_undef(b, 1, 32); nir_def *srcs[4] = {undef, undef, undef, undef}; for (unsigned i = 0; i < ncomps; ++i) - srcs[i] = nir_channel(b, &tf->dest.ssa, i); + srcs[i] = nir_channel(b, &tf->def, i); auto help = nir_vec(b, srcs, 4); - nir_def_rewrite_uses(&op->dest.ssa, help); + nir_def_rewrite_uses(&op->def, help); } else { - nir_def_rewrite_uses(&op->dest.ssa, &tf->dest.ssa); + nir_def_rewrite_uses(&op->def, &tf->def); } nir_instr_remove(instr); return true; @@ -545,10 +545,10 @@ r600_append_tcs_TF_emission(nir_shader *shader, enum mesa_prim prim_type) auto invocation_id = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_invocation_id); - nir_def_init(&invocation_id->instr, &invocation_id->dest.ssa, 1, 32); + nir_def_init(&invocation_id->instr, &invocation_id->def, 1, 32); nir_builder_instr_insert(b, &invocation_id->instr); - nir_push_if(b, nir_ieq_imm(b, &invocation_id->dest.ssa, 0)); + nir_push_if(b, nir_ieq_imm(b, &invocation_id->def, 0)); auto base = emit_load_param_base(b, nir_intrinsic_load_tcs_out_param_base_r600); auto rel_patch_id = r600_load_rel_patch_id(b); @@ -560,21 +560,21 @@ r600_append_tcs_TF_emission(nir_shader *shader, enum mesa_prim prim_type) tf_outer->num_components = outer_comps; tf_outer->src[0] = nir_src_for_ssa(addr_outer); nir_def_init( - &tf_outer->instr, &tf_outer->dest.ssa, tf_outer->num_components, 32); + &tf_outer->instr, &tf_outer->def, tf_outer->num_components, 32); nir_builder_instr_insert(b, &tf_outer->instr); std::vector tf_out; auto tf_out_base = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_tcs_tess_factor_base_r600); - nir_def_init(&tf_out_base->instr, &tf_out_base->dest.ssa, 1, 32); + nir_def_init(&tf_out_base->instr, &tf_out_base->def, 1, 32); nir_builder_instr_insert(b, &tf_out_base->instr); auto out_addr0 = nir_build_alu(b, nir_op_umad24, rel_patch_id, nir_imm_int(b, stride), - &tf_out_base->dest.ssa, + &tf_out_base->def, NULL); int chanx = 0; int chany = 1; @@ -586,22 +586,22 @@ r600_append_tcs_TF_emission(nir_shader *shader, enum mesa_prim prim_type) tf_out.push_back(nir_vec2(b, out_addr0, - nir_channel(b, &tf_outer->dest.ssa, chanx))); + nir_channel(b, &tf_outer->def, chanx))); tf_out.push_back(nir_vec2(b, nir_iadd_imm(b, out_addr0, 4), - nir_channel(b, &tf_outer->dest.ssa, chany))); + nir_channel(b, &tf_outer->def, chany))); if (outer_comps > 2) { tf_out.push_back(nir_vec2(b, nir_iadd_imm(b, out_addr0, 8), - nir_channel(b, &tf_outer->dest.ssa, 2))); + nir_channel(b, &tf_outer->def, 2))); } if (outer_comps > 3) { tf_out.push_back(nir_vec2(b, nir_iadd_imm(b, out_addr0, 12), - nir_channel(b, &tf_outer->dest.ssa, 3))); + nir_channel(b, &tf_outer->def, 3))); inner_base = 16; } @@ -613,18 +613,18 @@ r600_append_tcs_TF_emission(nir_shader *shader, enum mesa_prim prim_type) tf_inner->num_components = inner_comps; tf_inner->src[0] = nir_src_for_ssa(addr1); nir_def_init( - &tf_inner->instr, &tf_inner->dest.ssa, tf_inner->num_components, 32); + &tf_inner->instr, &tf_inner->def, tf_inner->num_components, 32); nir_builder_instr_insert(b, &tf_inner->instr); tf_out.push_back(nir_vec2(b, nir_iadd_imm(b, out_addr0, inner_base), - nir_channel(b, &tf_inner->dest.ssa, 0))); + nir_channel(b, &tf_inner->def, 0))); if (inner_comps > 1) { tf_out.push_back(nir_vec2(b, nir_iadd_imm(b, out_addr0, inner_base + 4), - nir_channel(b, &tf_inner->dest.ssa, 1))); + nir_channel(b, &tf_inner->def, 1))); } } diff --git a/src/gallium/drivers/r600/sfn/sfn_nir_vectorize_vs_inputs.c b/src/gallium/drivers/r600/sfn/sfn_nir_vectorize_vs_inputs.c index 9df39de..c08cb9e 100644 --- a/src/gallium/drivers/r600/sfn/sfn_nir_vectorize_vs_inputs.c +++ b/src/gallium/drivers/r600/sfn/sfn_nir_vectorize_vs_inputs.c @@ -147,14 +147,14 @@ r600_create_new_load(nir_builder *b, b->cursor = nir_before_instr(&intr->instr); nir_intrinsic_instr *new_intr = nir_intrinsic_instr_create(b->shader, intr->intrinsic); - nir_def_init(&new_intr->instr, &new_intr->dest.ssa, num_comps, - intr->dest.ssa.bit_size); + nir_def_init(&new_intr->instr, &new_intr->def, num_comps, + intr->def.bit_size); new_intr->num_components = num_comps; nir_deref_instr *deref = nir_build_deref_var(b, var); deref = r600_clone_deref_array(b, deref, nir_src_as_deref(intr->src[0])); - new_intr->src[0] = nir_src_for_ssa(&deref->dest.ssa); + new_intr->src[0] = nir_src_for_ssa(&deref->def); if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset || intr->intrinsic == nir_intrinsic_interp_deref_at_sample) @@ -164,8 +164,8 @@ r600_create_new_load(nir_builder *b, for (unsigned i = 0; i < old_num_comps; ++i) channels[i] = comp - var->data.location_frac + i; - nir_def *load = nir_swizzle(b, &new_intr->dest.ssa, channels, old_num_comps); - nir_def_rewrite_uses(&intr->dest.ssa, load); + nir_def *load = nir_swizzle(b, &new_intr->def, channels, old_num_comps); + nir_def_rewrite_uses(&intr->def, load); /* Remove the old load intrinsic */ nir_instr_remove(&intr->instr); diff --git a/src/gallium/drivers/r600/sfn/sfn_shader.cpp b/src/gallium/drivers/r600/sfn/sfn_shader.cpp index cad537b..cef1b3d 100644 --- a/src/gallium/drivers/r600/sfn/sfn_shader.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_shader.cpp @@ -1053,12 +1053,12 @@ RegisterAccessHandler::RegisterAccessHandler(Shader& shader, nir_intrinsic_instr void RegisterReadHandler::visit(LocalArray& array) { - int slots = ir->dest.ssa.bit_size / 32; - auto pin = ir->dest.ssa.num_components > 1 ? pin_none : pin_free; - for (int i = 0; i < ir->dest.ssa.num_components; ++i) { + int slots = ir->def.bit_size / 32; + auto pin = ir->def.num_components > 1 ? pin_none : pin_free; + for (int i = 0; i < ir->def.num_components; ++i) { for (int s = 0; s < slots; ++s) { int chan = i * slots + s; - auto dest = sh.value_factory().dest(ir->dest.ssa, chan, pin); + auto dest = sh.value_factory().dest(ir->def, chan, pin); auto src = array.element(nir_intrinsic_base(ir), addr, chan); sh.emit_instruction(new AluInstr(op1_mov, dest, src, AluInstr::write)); } @@ -1067,7 +1067,7 @@ void RegisterReadHandler::visit(LocalArray& array) void RegisterReadHandler::visit(Register& reg) { - auto dest = sh.value_factory().dest(ir->dest.ssa, 0, pin_free); + auto dest = sh.value_factory().dest(ir->def, 0, pin_free); sh.emit_instruction(new AluInstr(op1_mov, dest, ®, AluInstr::write)); } @@ -1100,11 +1100,11 @@ void RegisterWriteHandler::visit(Register& dest) bool Shader::emit_atomic_local_shared(nir_intrinsic_instr *instr) { - bool uses_retval = !list_is_empty(&instr->dest.ssa.uses); + bool uses_retval = !list_is_empty(&instr->def.uses); auto& vf = value_factory(); - auto dest_value = uses_retval ? vf.dest(instr->dest.ssa, 0, pin_free) : nullptr; + auto dest_value = uses_retval ? vf.dest(instr->def, 0, pin_free) : nullptr; auto op = lds_op_from_intrinsic(nir_intrinsic_atomic_op(instr), uses_retval); @@ -1113,7 +1113,7 @@ Shader::emit_atomic_local_shared(nir_intrinsic_instr *instr) * value from read queue. */ if (!uses_retval && (op == LDS_XCHG_RET || op == LDS_CMP_XCHG_RET)) { - dest_value = vf.dest(instr->dest.ssa, 0, pin_free); + dest_value = vf.dest(instr->def, 0, pin_free); } auto address = vf.src(instr->src[0], 0); @@ -1217,7 +1217,7 @@ bool Shader::emit_load_scratch(nir_intrinsic_instr *intr) { auto addr = value_factory().src(intr->src[0], 0); - auto dest = value_factory().dest_vec4(intr->dest.ssa, pin_group); + auto dest = value_factory().dest_vec4(intr->def, pin_group); if (chip_class() >= ISA_CC_R700) { RegisterVec4::Swizzle dest_swz = {7, 7, 7, 7}; @@ -1265,7 +1265,7 @@ Shader::emit_load_scratch(nir_intrinsic_instr *intr) bool Shader::emit_load_global(nir_intrinsic_instr *intr) { - auto dest = value_factory().dest_vec4(intr->dest.ssa, pin_group); + auto dest = value_factory().dest_vec4(intr->def, pin_group); auto src_value = value_factory().src(intr->src[0], 0); auto src = src_value->as_register(); @@ -1312,7 +1312,7 @@ bool Shader::emit_local_load(nir_intrinsic_instr *instr) { auto address = value_factory().src_vec(instr->src[0], instr->num_components); - auto dest_value = value_factory().dest_vec(instr->dest.ssa, instr->num_components); + auto dest_value = value_factory().dest_vec(instr->def, instr->num_components); emit_instruction(new LDSReadInstr(dest_value, address)); return true; } @@ -1450,7 +1450,7 @@ Shader::emit_load_tcs_param_base(nir_intrinsic_instr *instr, int offset) emit_instruction( new AluInstr(op1_mov, src, value_factory().zero(), AluInstr::last_write)); - auto dest = value_factory().dest_vec4(instr->dest.ssa, pin_group); + auto dest = value_factory().dest_vec4(instr->def, pin_group); auto fetch = new LoadFromBuffer(dest, {0, 1, 2, 3}, src, @@ -1471,11 +1471,11 @@ Shader::emit_shader_clock(nir_intrinsic_instr *instr) auto& vf = value_factory(); auto group = new AluGroup(); group->add_instruction(new AluInstr(op1_mov, - vf.dest(instr->dest.ssa, 0, pin_chan), + vf.dest(instr->def, 0, pin_chan), vf.inline_const(ALU_SRC_TIME_LO, 0), AluInstr::write)); group->add_instruction(new AluInstr(op1_mov, - vf.dest(instr->dest.ssa, 1, pin_chan), + vf.dest(instr->def, 1, pin_chan), vf.inline_const(ALU_SRC_TIME_HI, 0), AluInstr::last_write)); emit_instruction(group); @@ -1536,9 +1536,9 @@ Shader::load_ubo(nir_intrinsic_instr *instr) auto addr = value_factory().src(instr->src[1], 0)->as_register(); RegisterVec4::Swizzle dest_swz{7, 7, 7, 7}; - auto dest = value_factory().dest_vec4(instr->dest.ssa, pin_group); + auto dest = value_factory().dest_vec4(instr->def, pin_group); - for (unsigned i = 0; i < instr->dest.ssa.num_components; ++i) { + for (unsigned i = 0; i < instr->def.num_components; ++i) { dest_swz[i] = i + nir_intrinsic_component(instr); } @@ -1560,18 +1560,18 @@ Shader::load_ubo(nir_intrinsic_instr *instr) int buf_cmp = nir_intrinsic_component(instr); AluInstr *ir = nullptr; - auto pin = instr->dest.ssa.num_components == 1 + auto pin = instr->def.num_components == 1 ? pin_free : pin_none; - for (unsigned i = 0; i < instr->dest.ssa.num_components; ++i) { + for (unsigned i = 0; i < instr->def.num_components; ++i) { - sfn_log << SfnLog::io << "UBO[" << bufid << "] " << instr->dest.ssa.index + sfn_log << SfnLog::io << "UBO[" << bufid << "] " << instr->def.index << " const[" << i << "]: " << instr->const_index[i] << "\n"; auto uniform = value_factory().uniform(512 + buf_offset->u32, i + buf_cmp, bufid->u32); ir = new AluInstr(op1_mov, - value_factory().dest(instr->dest.ssa, i, pin), + value_factory().dest(instr->def, i, pin), uniform, {alu_write}); emit_instruction(ir); @@ -1584,11 +1584,11 @@ Shader::load_ubo(nir_intrinsic_instr *instr) AluInstr *ir = nullptr; auto kc_id = value_factory().src(instr->src[0], 0); - for (unsigned i = 0; i < instr->dest.ssa.num_components; ++i) { + for (unsigned i = 0; i < instr->def.num_components; ++i) { int cmp = buf_cmp + i; auto u = new UniformValue(512 + buf_offset->u32, cmp, kc_id, nir_intrinsic_base(instr)); - auto dest = value_factory().dest(instr->dest.ssa, i, pin_none); + auto dest = value_factory().dest(instr->def, i, pin_none); ir = new AluInstr(op1_mov, dest, u, AluInstr::write); emit_instruction(ir); } diff --git a/src/gallium/drivers/r600/sfn/sfn_shader_cs.cpp b/src/gallium/drivers/r600/sfn/sfn_shader_cs.cpp index 24a16e3..4cd2d4c 100644 --- a/src/gallium/drivers/r600/sfn/sfn_shader_cs.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_shader_cs.cpp @@ -102,7 +102,7 @@ ComputeShader::emit_load_from_info_buffer(nir_intrinsic_instr *instr, int offset AluInstr::last_write)); } - auto dest = value_factory().dest_vec4(instr->dest.ssa, pin_group); + auto dest = value_factory().dest_vec4(instr->def, pin_group); auto ir = new LoadFromBuffer(dest, {0, 1, 2, 7}, @@ -126,7 +126,7 @@ ComputeShader::emit_load_3vec(nir_intrinsic_instr *instr, auto& vf = value_factory(); for (int i = 0; i < 3; ++i) { - auto dest = vf.dest(instr->dest.ssa, i, pin_none); + auto dest = vf.dest(instr->def, i, pin_none); emit_instruction(new AluInstr( op1_mov, dest, src[i], i == 2 ? AluInstr::last_write : AluInstr::write)); } diff --git a/src/gallium/drivers/r600/sfn/sfn_shader_fs.cpp b/src/gallium/drivers/r600/sfn/sfn_shader_fs.cpp index 86f66c7..d37b65c 100644 --- a/src/gallium/drivers/r600/sfn/sfn_shader_fs.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_shader_fs.cpp @@ -78,9 +78,9 @@ FragmentShader::load_input(nir_intrinsic_instr *intr) auto location = nir_intrinsic_io_semantics(intr).location; if (location == VARYING_SLOT_POS) { AluInstr *ir = nullptr; - for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) { + for (unsigned i = 0; i < intr->def.num_components; ++i) { ir = new AluInstr(op1_mov, - vf.dest(intr->dest.ssa, i, pin_none), + vf.dest(intr->def, i, pin_none), m_pos_input[i], AluInstr::write); emit_instruction(ir); @@ -91,7 +91,7 @@ FragmentShader::load_input(nir_intrinsic_instr *intr) if (location == VARYING_SLOT_FACE) { auto ir = new AluInstr(op2_setgt_dx10, - vf.dest(intr->dest.ssa, 0, pin_none), + vf.dest(intr->def, 0, pin_none), m_face_input, vf.inline_const(ALU_SRC_0, 0), AluInstr::last_write); @@ -181,9 +181,9 @@ FragmentShader::process_stage_intrinsic(nir_intrinsic_instr *intr) if (m_apply_sample_mask) { return emit_load_sample_mask_in(intr); } else - return emit_simple_mov(intr->dest.ssa, 0, m_sample_mask_reg); + return emit_simple_mov(intr->def, 0, m_sample_mask_reg); case nir_intrinsic_load_sample_id: - return emit_simple_mov(intr->dest.ssa, 0, m_sample_id_reg); + return emit_simple_mov(intr->def, 0, m_sample_id_reg); case nir_intrinsic_load_helper_invocation: return emit_load_helper_invocation(intr); case nir_intrinsic_load_sample_pos: @@ -200,8 +200,8 @@ FragmentShader::load_interpolated_input(nir_intrinsic_instr *intr) unsigned loc = nir_intrinsic_io_semantics(intr).location; switch (loc) { case VARYING_SLOT_POS: - for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) - vf.inject_value(intr->dest.ssa, i, m_pos_input[i]); + for (unsigned i = 0; i < intr->def.num_components; ++i) + vf.inject_value(intr->def, i, m_pos_input[i]); return true; case VARYING_SLOT_FACE: return false; @@ -301,7 +301,7 @@ bool FragmentShader::emit_load_sample_mask_in(nir_intrinsic_instr *instr) { auto& vf = value_factory(); - auto dest = vf.dest(instr->dest.ssa, 0, pin_free); + auto dest = vf.dest(instr->def, 0, pin_free); auto tmp = vf.temp_register(); assert(m_sample_id_reg); assert(m_sample_mask_reg); @@ -332,7 +332,7 @@ FragmentShader::emit_load_helper_invocation(nir_intrinsic_instr *instr) vtx->set_fetch_flag(FetchInstr::vpm); vtx->set_fetch_flag(FetchInstr::use_tc); vtx->set_always_keep(); - auto dst = value_factory().dest(instr->dest.ssa, 0, pin_free); + auto dst = value_factory().dest(instr->def, 0, pin_free); auto ir = new AluInstr(op1_mov, dst, m_helper_invocation, AluInstr::last_write); ir->add_required_instr(vtx); emit_instruction(vtx); @@ -570,7 +570,7 @@ FragmentShader::emit_export_pixel(nir_intrinsic_instr& intr) bool FragmentShader::emit_load_sample_pos(nir_intrinsic_instr *instr) { - auto dest = value_factory().dest_vec4(instr->dest.ssa, pin_group); + auto dest = value_factory().dest_vec4(instr->def, pin_group); auto fetch = new LoadFromBuffer(dest, {0, 1, 2, 3}, @@ -684,12 +684,12 @@ FragmentShaderR600::load_input_hw(nir_intrinsic_instr *intr) { auto& vf = value_factory(); AluInstr *ir = nullptr; - for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) { + for (unsigned i = 0; i < intr->def.num_components; ++i) { sfn_log << SfnLog::io << "Inject register " << *m_interpolated_inputs[nir_intrinsic_base(intr)][i] << "\n"; unsigned index = nir_intrinsic_component(intr) + i; assert(index < 4); - vf.inject_value(intr->dest.ssa, + vf.inject_value(intr->def, i, m_interpolated_inputs[nir_intrinsic_base(intr)][index]); } @@ -726,7 +726,7 @@ FragmentShaderEG::load_input_hw(nir_intrinsic_instr *intr) bool need_temp = comp > 0; AluInstr *ir = nullptr; - for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) { + for (unsigned i = 0; i < intr->def.num_components; ++i) { if (need_temp) { auto tmp = vf.temp_register(comp + i); ir = @@ -736,11 +736,11 @@ FragmentShaderEG::load_input_hw(nir_intrinsic_instr *intr) AluInstr::last_write); emit_instruction(ir); emit_instruction(new AluInstr( - op1_mov, vf.dest(intr->dest.ssa, i, pin_chan), tmp, AluInstr::last_write)); + op1_mov, vf.dest(intr->def, i, pin_chan), tmp, AluInstr::last_write)); } else { ir = new AluInstr(op1_interp_load_p0, - vf.dest(intr->dest.ssa, i, pin_chan), + vf.dest(intr->def, i, pin_chan), new InlineConstant(ALU_SRC_PARAM_BASE + io.lds_pos(), i), AluInstr::write); emit_instruction(ir); @@ -786,8 +786,8 @@ FragmentShaderEG::process_stage_intrinsic_hw(nir_intrinsic_instr *intr) case nir_intrinsic_load_barycentric_pixel: case nir_intrinsic_load_barycentric_sample: { unsigned ij = barycentric_ij_index(intr); - vf.inject_value(intr->dest.ssa, 0, m_interpolator[ij].i); - vf.inject_value(intr->dest.ssa, 1, m_interpolator[ij].j); + vf.inject_value(intr->def, 0, m_interpolator[ij].i); + vf.inject_value(intr->def, 1, m_interpolator[ij].j); return true; } case nir_intrinsic_load_barycentric_at_offset: @@ -806,11 +806,11 @@ FragmentShaderEG::load_interpolated_input_hw(nir_intrinsic_instr *intr) ASSERTED auto param = nir_src_as_const_value(intr->src[1]); assert(param && "Indirect PS inputs not (yet) supported"); - int dest_num_comp = intr->dest.ssa.num_components; + int dest_num_comp = intr->def.num_components; int start_comp = nir_intrinsic_component(intr); bool need_temp = start_comp > 0; - auto dst = need_temp ? vf.temp_vec4(pin_chan) : vf.dest_vec4(intr->dest.ssa, pin_chan); + auto dst = need_temp ? vf.temp_vec4(pin_chan) : vf.dest_vec4(intr->def, pin_chan); InterpolateParams params; @@ -823,8 +823,8 @@ FragmentShaderEG::load_interpolated_input_hw(nir_intrinsic_instr *intr) if (need_temp) { AluInstr *ir = nullptr; - for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) { - auto real_dst = vf.dest(intr->dest.ssa, i, pin_chan); + for (unsigned i = 0; i < intr->def.num_components; ++i) { + auto real_dst = vf.dest(intr->def, i, pin_chan); ir = new AluInstr(op1_mov, real_dst, dst[i + start_comp], AluInstr::write); emit_instruction(ir); } @@ -936,13 +936,13 @@ FragmentShaderEG::load_barycentric_at_sample(nir_intrinsic_instr *instr) op3_muladd, tmp1, grad[1], slope[2], interpolator.i, {alu_write, alu_last_instr})); emit_instruction(new AluInstr(op3_muladd, - vf.dest(instr->dest.ssa, 0, pin_none), + vf.dest(instr->def, 0, pin_none), grad[3], slope[3], tmp1, {alu_write})); emit_instruction(new AluInstr(op3_muladd, - vf.dest(instr->dest.ssa, 1, pin_none), + vf.dest(instr->def, 1, pin_none), grad[2], slope[3], tmp0, @@ -987,9 +987,9 @@ FragmentShaderEG::load_barycentric_at_offset(nir_intrinsic_instr *instr) emit_instruction(new AluInstr( op3_muladd, tmp1, help[1], ofs_x, interpolator.i, {alu_write, alu_last_instr})); emit_instruction(new AluInstr( - op3_muladd, vf.dest(instr->dest.ssa, 0, pin_none), help[3], ofs_y, tmp1, {alu_write})); + op3_muladd, vf.dest(instr->def, 0, pin_none), help[3], ofs_y, tmp1, {alu_write})); emit_instruction(new AluInstr(op3_muladd, - vf.dest(instr->dest.ssa, 1, pin_none), + vf.dest(instr->def, 1, pin_none), help[2], ofs_y, tmp0, diff --git a/src/gallium/drivers/r600/sfn/sfn_shader_gs.cpp b/src/gallium/drivers/r600/sfn/sfn_shader_gs.cpp index 773b5b8..ff32124 100644 --- a/src/gallium/drivers/r600/sfn/sfn_shader_gs.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_shader_gs.cpp @@ -185,9 +185,9 @@ GeometryShader::process_stage_intrinsic(nir_intrinsic_instr *intr) case nir_intrinsic_end_primitive: return emit_vertex(intr, true); case nir_intrinsic_load_primitive_id: - return emit_simple_mov(intr->dest.ssa, 0, m_primitive_id); + return emit_simple_mov(intr->def, 0, m_primitive_id); case nir_intrinsic_load_invocation_id: - return emit_simple_mov(intr->dest.ssa, 0, m_invocation_id); + return emit_simple_mov(intr->def, 0, m_invocation_id); case nir_intrinsic_load_per_vertex_input: return emit_load_per_vertex_input(intr); default:; @@ -320,10 +320,10 @@ GeometryShader::store_output(nir_intrinsic_instr *instr) bool GeometryShader::emit_load_per_vertex_input(nir_intrinsic_instr *instr) { - auto dest = value_factory().dest_vec4(instr->dest.ssa, pin_group); + auto dest = value_factory().dest_vec4(instr->def, pin_group); RegisterVec4::Swizzle dest_swz{7, 7, 7, 7}; - for (unsigned i = 0; i < instr->dest.ssa.num_components; ++i) { + for (unsigned i = 0; i < instr->def.num_components; ++i) { dest_swz[i] = i + nir_intrinsic_component(instr); } diff --git a/src/gallium/drivers/r600/sfn/sfn_shader_tess.cpp b/src/gallium/drivers/r600/sfn/sfn_shader_tess.cpp index 8bfc0a6..f37a227 100644 --- a/src/gallium/drivers/r600/sfn/sfn_shader_tess.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_shader_tess.cpp @@ -97,13 +97,13 @@ TCSShader::process_stage_intrinsic(nir_intrinsic_instr *instr) { switch (instr->intrinsic) { case nir_intrinsic_load_tcs_rel_patch_id_r600: - return emit_simple_mov(instr->dest.ssa, 0, m_rel_patch_id); + return emit_simple_mov(instr->def, 0, m_rel_patch_id); case nir_intrinsic_load_invocation_id: - return emit_simple_mov(instr->dest.ssa, 0, m_invocation_id); + return emit_simple_mov(instr->def, 0, m_invocation_id); case nir_intrinsic_load_primitive_id: - return emit_simple_mov(instr->dest.ssa, 0, m_primitive_id); + return emit_simple_mov(instr->def, 0, m_primitive_id); case nir_intrinsic_load_tcs_tess_factor_base_r600: - return emit_simple_mov(instr->dest.ssa, 0, m_tess_factor_base); + return emit_simple_mov(instr->def, 0, m_tess_factor_base); case nir_intrinsic_store_tf_r600: return store_tess_factor(instr); default: @@ -246,12 +246,12 @@ TESShader::process_stage_intrinsic(nir_intrinsic_instr *intr) { switch (intr->intrinsic) { case nir_intrinsic_load_tess_coord_xy: - return emit_simple_mov(intr->dest.ssa, 0, m_tess_coord[0], pin_none) && - emit_simple_mov(intr->dest.ssa, 1, m_tess_coord[1], pin_none); + return emit_simple_mov(intr->def, 0, m_tess_coord[0], pin_none) && + emit_simple_mov(intr->def, 1, m_tess_coord[1], pin_none); case nir_intrinsic_load_primitive_id: - return emit_simple_mov(intr->dest.ssa, 0, m_primitive_id); + return emit_simple_mov(intr->def, 0, m_primitive_id); case nir_intrinsic_load_tcs_rel_patch_id_r600: - return emit_simple_mov(intr->dest.ssa, 0, m_rel_patch_id); + return emit_simple_mov(intr->def, 0, m_rel_patch_id); case nir_intrinsic_store_output: return m_export_processor->store_output(*intr); default: diff --git a/src/gallium/drivers/r600/sfn/sfn_shader_vs.cpp b/src/gallium/drivers/r600/sfn/sfn_shader_vs.cpp index 0004db1..de76689 100644 --- a/src/gallium/drivers/r600/sfn/sfn_shader_vs.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_shader_vs.cpp @@ -502,10 +502,10 @@ VertexShader::load_input(nir_intrinsic_instr *intr) AluInstr *ir = nullptr; if (location < VERT_ATTRIB_MAX) { - for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) { + for (unsigned i = 0; i < intr->def.num_components; ++i) { auto src = vf.allocate_pinned_register(driver_location + 1, i); src->set_flag(Register::ssa); - vf.inject_value(intr->dest.ssa, i, src); + vf.inject_value(intr->def, i, src); } if (ir) ir->set_alu_flag(alu_last_instr); @@ -553,13 +553,13 @@ VertexShader::process_stage_intrinsic(nir_intrinsic_instr *intr) { switch (intr->intrinsic) { case nir_intrinsic_load_vertex_id: - return emit_simple_mov(intr->dest.ssa, 0, m_vertex_id); + return emit_simple_mov(intr->def, 0, m_vertex_id); case nir_intrinsic_load_instance_id: - return emit_simple_mov(intr->dest.ssa, 0, m_instance_id); + return emit_simple_mov(intr->def, 0, m_instance_id); case nir_intrinsic_load_primitive_id: - return emit_simple_mov(intr->dest.ssa, 0, primitive_id()); + return emit_simple_mov(intr->def, 0, primitive_id()); case nir_intrinsic_load_tcs_rel_patch_id_r600: - return emit_simple_mov(intr->dest.ssa, 0, m_rel_vertex_id); + return emit_simple_mov(intr->def, 0, m_rel_vertex_id); default: return false; } diff --git a/src/gallium/drivers/r600/sfn/sfn_valuefactory.cpp b/src/gallium/drivers/r600/sfn/sfn_valuefactory.cpp index 8965a81..e68a94f 100644 --- a/src/gallium/drivers/r600/sfn/sfn_valuefactory.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_valuefactory.cpp @@ -79,12 +79,12 @@ ValueFactory::allocate_registers(const std::list& regs) if (num_elms > 0 || num_comp > 1 || bit_size > 32) { array_entry ae = { - intr->dest.ssa.index, + intr->def.index, num_elms ? num_elms : 1, bit_size / 32 * num_comp}; arrays.push(ae); } else { - non_array.push_back(intr->dest.ssa.index); + non_array.push_back(intr->def.index); } } diff --git a/src/gallium/drivers/radeonsi/si_nir_lower_abi.c b/src/gallium/drivers/radeonsi/si_nir_lower_abi.c index c97a4ba..010df9e 100644 --- a/src/gallium/drivers/radeonsi/si_nir_lower_abi.c +++ b/src/gallium/drivers/radeonsi/si_nir_lower_abi.c @@ -350,7 +350,7 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s case nir_intrinsic_load_tess_level_outer_default: case nir_intrinsic_load_tess_level_inner_default: { nir_def *buf = si_nir_load_internal_binding(b, args, SI_HS_CONST_DEFAULT_TESS_LEVELS, 4); - unsigned num_components = intrin->dest.ssa.num_components; + unsigned num_components = intrin->def.num_components; unsigned offset = intrin->intrinsic == nir_intrinsic_load_tess_level_inner_default ? 16 : 0; replacement = nir_load_ubo(b, num_components, 32, buf, nir_imm_int(b, offset), @@ -709,7 +709,7 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s } if (replacement) - nir_def_rewrite_uses(&intrin->dest.ssa, replacement); + nir_def_rewrite_uses(&intrin->def, replacement); nir_instr_remove(instr); nir_instr_free(instr); diff --git a/src/gallium/drivers/radeonsi/si_nir_lower_resource.c b/src/gallium/drivers/radeonsi/si_nir_lower_resource.c index 66c000f..b63839e 100644 --- a/src/gallium/drivers/radeonsi/si_nir_lower_resource.c +++ b/src/gallium/drivers/radeonsi/si_nir_lower_resource.c @@ -291,7 +291,7 @@ static bool lower_resource_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin nir_def *desc = load_ssbo_desc(b, &intrin->src[0], s); nir_def *size = nir_channel(b, desc, 2); - nir_def_rewrite_uses(&intrin->dest.ssa, size); + nir_def_rewrite_uses(&intrin->def, size); nir_instr_remove(&intrin->instr); break; } @@ -323,7 +323,7 @@ static bool lower_resource_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin nir_def *desc = load_deref_image_desc(b, deref, desc_type, is_load, s); if (intrin->intrinsic == nir_intrinsic_image_deref_descriptor_amd) { - nir_def_rewrite_uses(&intrin->dest.ssa, desc); + nir_def_rewrite_uses(&intrin->def, desc); nir_instr_remove(&intrin->instr); } else { nir_intrinsic_set_image_dim(intrin, glsl_get_sampler_dim(deref->type)); @@ -359,7 +359,7 @@ static bool lower_resource_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin nir_def *desc = load_bindless_image_desc(b, index, desc_type, is_load, s); if (intrin->intrinsic == nir_intrinsic_bindless_image_descriptor_amd) { - nir_def_rewrite_uses(&intrin->dest.ssa, desc); + nir_def_rewrite_uses(&intrin->def, desc); nir_instr_remove(&intrin->instr); } else { nir_instr_rewrite_src(&intrin->instr, &intrin->src[0], nir_src_for_ssa(desc)); @@ -498,7 +498,7 @@ static bool lower_resource_tex(nir_builder *b, nir_tex_instr *tex, image = load_deref_sampler_desc(b, texture_deref, desc_type, s, true); else image = load_bindless_sampler_desc(b, texture_handle, desc_type, s); - nir_def_rewrite_uses(&tex->dest.ssa, image); + nir_def_rewrite_uses(&tex->def, image); nir_instr_remove(&tex->instr); return true; } @@ -509,7 +509,7 @@ static bool lower_resource_tex(nir_builder *b, nir_tex_instr *tex, sampler = load_deref_sampler_desc(b, sampler_deref, AC_DESC_SAMPLER, s, true); else sampler = load_bindless_sampler_desc(b, sampler_handle, AC_DESC_SAMPLER, s); - nir_def_rewrite_uses(&tex->dest.ssa, sampler); + nir_def_rewrite_uses(&tex->def, sampler); nir_instr_remove(&tex->instr); return true; } diff --git a/src/gallium/drivers/radeonsi/si_nir_lower_vs_inputs.c b/src/gallium/drivers/radeonsi/si_nir_lower_vs_inputs.c index c82da81..50e75b4 100644 --- a/src/gallium/drivers/radeonsi/si_nir_lower_vs_inputs.c +++ b/src/gallium/drivers/radeonsi/si_nir_lower_vs_inputs.c @@ -581,17 +581,17 @@ lower_vs_input_instr(nir_builder *b, nir_instr *instr, void *state) unsigned input_index = nir_intrinsic_base(intrin); unsigned component = nir_intrinsic_component(intrin); - unsigned num_components = intrin->dest.ssa.num_components; + unsigned num_components = intrin->def.num_components; nir_def *comp[4]; if (s->shader->selector->info.base.vs.blit_sgprs_amd) load_vs_input_from_blit_sgpr(b, input_index, s, comp); else - load_vs_input_from_vertex_buffer(b, input_index, s, intrin->dest.ssa.bit_size, comp); + load_vs_input_from_vertex_buffer(b, input_index, s, intrin->def.bit_size, comp); nir_def *replacement = nir_vec(b, &comp[component], num_components); - nir_def_rewrite_uses(&intrin->dest.ssa, replacement); + nir_def_rewrite_uses(&intrin->def, replacement); nir_instr_remove(instr); nir_instr_free(instr); diff --git a/src/gallium/drivers/radeonsi/si_nir_optim.c b/src/gallium/drivers/radeonsi/si_nir_optim.c index 7a76a00..13bdbd7 100644 --- a/src/gallium/drivers/radeonsi/si_nir_optim.c +++ b/src/gallium/drivers/radeonsi/si_nir_optim.c @@ -136,7 +136,7 @@ replace_tex_by_imm(nir_builder *b, nir_instr *instr, void *state) b->cursor = nir_instr_remove(&tex->instr); nir_def *imm = nir_imm_vec4(b, p->value[0], p->value[1], p->value[2], p->value[3]); - nir_def_rewrite_uses(&tex->dest.ssa, imm); + nir_def_rewrite_uses(&tex->def, imm); return true; } diff --git a/src/gallium/drivers/radeonsi/si_shader.c b/src/gallium/drivers/radeonsi/si_shader.c index 2acf1e9..e624f59 100644 --- a/src/gallium/drivers/radeonsi/si_shader.c +++ b/src/gallium/drivers/radeonsi/si_shader.c @@ -1917,7 +1917,7 @@ static bool lower_ps_load_color_intrinsic(nir_builder *b, nir_instr *instr, void unsigned index = intrin->intrinsic == nir_intrinsic_load_color0 ? 0 : 1; assert(colors[index]); - nir_def_rewrite_uses(&intrin->dest.ssa, colors[index]); + nir_def_rewrite_uses(&intrin->def, colors[index]); nir_instr_remove(&intrin->instr); return true; diff --git a/src/gallium/drivers/radeonsi/si_shader_info.c b/src/gallium/drivers/radeonsi/si_shader_info.c index 8e53860..534612a 100644 --- a/src/gallium/drivers/radeonsi/si_shader_info.c +++ b/src/gallium/drivers/radeonsi/si_shader_info.c @@ -230,8 +230,8 @@ static void scan_io_usage(const nir_shader *nir, struct si_shader_info *info, bit_size = nir_src_bit_size(intr->src[0]); is_output_load = false; } else { - mask = nir_def_components_read(&intr->dest.ssa); /* load */ - bit_size = intr->dest.ssa.bit_size; + mask = nir_def_components_read(&intr->def); /* load */ + bit_size = intr->def.bit_size; is_output_load = !is_input; } assert(bit_size != 64 && !(mask & ~0xf) && "64-bit IO should have been lowered"); @@ -478,7 +478,7 @@ static void scan_instruction(const struct nir_shader *nir, struct si_shader_info break; case nir_intrinsic_load_local_invocation_id: case nir_intrinsic_load_workgroup_id: { - unsigned mask = nir_def_components_read(&intr->dest.ssa); + unsigned mask = nir_def_components_read(&intr->def); while (mask) { unsigned i = u_bit_scan(&mask); @@ -492,7 +492,7 @@ static void scan_instruction(const struct nir_shader *nir, struct si_shader_info case nir_intrinsic_load_color0: case nir_intrinsic_load_color1: { unsigned index = intr->intrinsic == nir_intrinsic_load_color1; - uint8_t mask = nir_def_components_read(&intr->dest.ssa); + uint8_t mask = nir_def_components_read(&intr->def); info->colors_read |= mask << (index * 4); switch (info->color_interpolate[index]) { @@ -541,10 +541,10 @@ static void scan_instruction(const struct nir_shader *nir, struct si_shader_info info->uses_interp_at_sample = true; break; case nir_intrinsic_load_frag_coord: - info->reads_frag_coord_mask |= nir_def_components_read(&intr->dest.ssa); + info->reads_frag_coord_mask |= nir_def_components_read(&intr->def); break; case nir_intrinsic_load_sample_pos: - info->reads_sample_pos_mask |= nir_def_components_read(&intr->dest.ssa); + info->reads_sample_pos_mask |= nir_def_components_read(&intr->def); break; case nir_intrinsic_load_input: case nir_intrinsic_load_per_vertex_input: diff --git a/src/gallium/drivers/radeonsi/si_shader_nir.c b/src/gallium/drivers/radeonsi/si_shader_nir.c index 7ac7775..11d5ee4 100644 --- a/src/gallium/drivers/radeonsi/si_shader_nir.c +++ b/src/gallium/drivers/radeonsi/si_shader_nir.c @@ -402,7 +402,7 @@ static bool si_mark_divergent_texture_non_uniform(struct nir_shader *nir) } /* If dest is already divergent, divergence won't change. */ - divergence_changed |= !tex->dest.ssa.divergent && + divergence_changed |= !tex->def.divergent && (tex->texture_non_uniform || tex->sampler_non_uniform); } } diff --git a/src/gallium/drivers/radeonsi/si_shaderlib_nir.c b/src/gallium/drivers/radeonsi/si_shaderlib_nir.c index 7413728..ca2f631 100644 --- a/src/gallium/drivers/radeonsi/si_shaderlib_nir.c +++ b/src/gallium/drivers/radeonsi/si_shaderlib_nir.c @@ -64,7 +64,7 @@ static void unpack_2x16_signed(nir_builder *b, nir_def *src, nir_def **x, nir_de static nir_def * deref_ssa(nir_builder *b, nir_variable *var) { - return &nir_build_deref_var(b, var)->dest.ssa; + return &nir_build_deref_var(b, var)->def; } /* Create a NIR compute shader implementing copy_image. diff --git a/src/gallium/drivers/vc4/vc4_nir_lower_io.c b/src/gallium/drivers/vc4/vc4_nir_lower_io.c index 9dfef1d..f6d3824 100644 --- a/src/gallium/drivers/vc4/vc4_nir_lower_io.c +++ b/src/gallium/drivers/vc4/vc4_nir_lower_io.c @@ -49,7 +49,7 @@ replace_intrinsic_with_vec(nir_builder *b, nir_intrinsic_instr *intr, /* Replace the old intrinsic with a reference to our reconstructed * vector. */ - nir_def_rewrite_uses(&intr->dest.ssa, vec); + nir_def_rewrite_uses(&intr->def, vec); nir_instr_remove(&intr->instr); } @@ -239,7 +239,7 @@ vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b, c->fs_key->point_sprite_mask)) { assert(intr->num_components == 1); - nir_def *result = &intr->dest.ssa; + nir_def *result = &intr->def; switch (comp) { case 0: @@ -262,8 +262,8 @@ vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b, if (c->fs_key->point_coord_upper_left && comp == 1) result = nir_fsub_imm(b, 1.0, result); - if (result != &intr->dest.ssa) { - nir_def_rewrite_uses_after(&intr->dest.ssa, + if (result != &intr->def) { + nir_def_rewrite_uses_after(&intr->def, result, result->parent_instr); } @@ -299,8 +299,8 @@ vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b, nir_intrinsic_instr *intr_comp = nir_intrinsic_instr_create(c->s, intr->intrinsic); intr_comp->num_components = 1; - nir_def_init(&intr_comp->instr, &intr_comp->dest.ssa, 1, - intr->dest.ssa.bit_size); + nir_def_init(&intr_comp->instr, &intr_comp->def, 1, + intr->def.bit_size); /* Convert the uniform offset to bytes. If it happens * to be a constant, constant-folding will clean up @@ -315,7 +315,7 @@ vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b, intr_comp->src[0] = nir_src_for_ssa(nir_ishl_imm(b, intr->src[0].ssa, 4)); - dests[i] = &intr_comp->dest.ssa; + dests[i] = &intr_comp->def; nir_builder_instr_insert(b, &intr_comp->instr); } diff --git a/src/gallium/drivers/vc4/vc4_nir_lower_txf_ms.c b/src/gallium/drivers/vc4/vc4_nir_lower_txf_ms.c index 2b10d5a..56d4929 100644 --- a/src/gallium/drivers/vc4/vc4_nir_lower_txf_ms.c +++ b/src/gallium/drivers/vc4/vc4_nir_lower_txf_ms.c @@ -109,10 +109,10 @@ vc4_nir_lower_txf_ms_instr(nir_builder *b, nir_instr *instr, void *data) txf->src[0] = nir_tex_src_for_ssa(nir_tex_src_coord, nir_vec2(b, addr, nir_imm_int(b, 0))); - nir_def_init(&txf->instr, &txf->dest.ssa, 4, 32); + nir_def_init(&txf->instr, &txf->def, 4, 32); nir_builder_instr_insert(b, &txf->instr); - return &txf->dest.ssa; + return &txf->def; } static bool diff --git a/src/gallium/drivers/vc4/vc4_program.c b/src/gallium/drivers/vc4/vc4_program.c index 1e3441f..754130a 100644 --- a/src/gallium/drivers/vc4/vc4_program.c +++ b/src/gallium/drivers/vc4/vc4_program.c @@ -387,10 +387,10 @@ ntq_emit_txf(struct vc4_compile *c, nir_tex_instr *instr) if (util_format_is_depth_or_stencil(format)) { struct qreg scaled = ntq_scale_depth_texture(c, tex); for (int i = 0; i < 4; i++) - ntq_store_def(c, &instr->dest.ssa, i, qir_MOV(c, scaled)); + ntq_store_def(c, &instr->def, i, qir_MOV(c, scaled)); } else { for (int i = 0; i < 4; i++) - ntq_store_def(c, &instr->dest.ssa, i, + ntq_store_def(c, &instr->def, i, qir_UNPACK_8_F(c, tex, i)); } } @@ -561,11 +561,11 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) } for (int i = 0; i < 4; i++) - ntq_store_def(c, &instr->dest.ssa, i, + ntq_store_def(c, &instr->def, i, qir_MOV(c, depth_output)); } else { for (int i = 0; i < 4; i++) - ntq_store_def(c, &instr->dest.ssa, i, + ntq_store_def(c, &instr->def, i, qir_UNPACK_8_F(c, tex, i)); } } @@ -1643,7 +1643,7 @@ ntq_setup_registers(struct vc4_compile *c, nir_function_impl *impl) struct qreg *qregs = ralloc_array(c->def_ht, struct qreg, array_len * num_components); - nir_def *nir_reg = &decl->dest.ssa; + nir_def *nir_reg = &decl->def; _mesa_hash_table_insert(c->def_ht, nir_reg, qregs); for (int i = 0; i < array_len * num_components; i++) @@ -1689,7 +1689,7 @@ ntq_emit_color_read(struct vc4_compile *c, nir_intrinsic_instr *instr) qir_TLB_COLOR_READ(c); } } - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, qir_MOV(c, c->color_reads[sample_index])); } @@ -1709,7 +1709,7 @@ ntq_emit_load_input(struct vc4_compile *c, nir_intrinsic_instr *instr) uint32_t offset = nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[0]); int comp = nir_intrinsic_component(instr); - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, qir_MOV(c, c->inputs[offset * 4 + comp])); } @@ -1732,23 +1732,23 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) assert(offset % 4 == 0); /* We need dwords */ offset = offset / 4; - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, qir_uniform(c, QUNIFORM_UNIFORM, offset)); } else { - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, indirect_uniform_load(c, instr)); } break; case nir_intrinsic_load_ubo: assert(instr->num_components == 1); - ntq_store_def(c, &instr->dest.ssa, 0, vc4_ubo_load(c, instr)); + ntq_store_def(c, &instr->def, 0, vc4_ubo_load(c, instr)); break; case nir_intrinsic_load_user_clip_plane: for (int i = 0; i < nir_intrinsic_dest_components(instr); i++) { - ntq_store_def(c, &instr->dest.ssa, i, + ntq_store_def(c, &instr->def, i, qir_uniform(c, QUNIFORM_USER_CLIP_PLANE, nir_intrinsic_ucp_id(instr) * 4 + i)); @@ -1759,7 +1759,7 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) case nir_intrinsic_load_blend_const_color_g_float: case nir_intrinsic_load_blend_const_color_b_float: case nir_intrinsic_load_blend_const_color_a_float: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_X + (instr->intrinsic - nir_intrinsic_load_blend_const_color_r_float), @@ -1767,19 +1767,19 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) break; case nir_intrinsic_load_blend_const_color_rgba8888_unorm: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_RGBA, 0)); break; case nir_intrinsic_load_blend_const_color_aaaa8888_unorm: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_AAAA, 0)); break; case nir_intrinsic_load_sample_mask_in: - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0)); break; @@ -1787,7 +1787,7 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) /* The register contains 0 (front) or 1 (back), and we need to * turn it into a NIR bool where true means front. */ - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, qir_ADD(c, qir_uniform_ui(c, -1), qir_reg(QFILE_FRAG_REV_FLAG, 0))); @@ -1856,9 +1856,9 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) assert(nir_src_is_const(instr->src[0])); int sampler = nir_src_as_int(instr->src[0]); - ntq_store_def(c, &instr->dest.ssa, 0, + ntq_store_def(c, &instr->def, 0, qir_uniform(c, QUNIFORM_TEXRECT_SCALE_X, sampler)); - ntq_store_def(c, &instr->dest.ssa, 1, + ntq_store_def(c, &instr->def, 1, qir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y, sampler)); break; } diff --git a/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c b/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c index 700a057..303fa84 100644 --- a/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c +++ b/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c @@ -1380,7 +1380,7 @@ get_src_ssa(struct ntv_context *ctx, const nir_def *ssa, nir_alu_type *atype) static void init_reg(struct ntv_context *ctx, nir_intrinsic_instr *decl, nir_alu_type atype) { - unsigned index = decl->dest.ssa.index; + unsigned index = decl->def.index; unsigned num_components = nir_intrinsic_num_components(decl); unsigned bit_size = nir_intrinsic_bit_size(decl); @@ -2646,7 +2646,7 @@ emit_load_deref(struct ntv_context *ctx, nir_intrinsic_instr *intr) result = emit_atomic(ctx, SpvOpAtomicLoad, type, ptr, 0, 0); else result = spirv_builder_emit_load(&ctx->builder, type, ptr); - store_def(ctx, &intr->dest.ssa, result, atype); + store_def(ctx, &intr->def, result, atype); } static void @@ -2711,9 +2711,9 @@ emit_store_deref(struct ntv_context *ctx, nir_intrinsic_instr *intr) static void emit_load_shared(struct ntv_context *ctx, nir_intrinsic_instr *intr) { - SpvId dest_type = get_def_type(ctx, &intr->dest.ssa, nir_type_uint); - unsigned num_components = intr->dest.ssa.num_components; - unsigned bit_size = intr->dest.ssa.bit_size; + SpvId dest_type = get_def_type(ctx, &intr->def, nir_type_uint); + unsigned num_components = intr->def.num_components; + unsigned bit_size = intr->def.bit_size; SpvId uint_type = get_uvec_type(ctx, bit_size, 1); SpvId ptr_type = spirv_builder_type_pointer(&ctx->builder, SpvStorageClassWorkgroup, @@ -2736,7 +2736,7 @@ emit_load_shared(struct ntv_context *ctx, nir_intrinsic_instr *intr) result = spirv_builder_emit_composite_construct(&ctx->builder, dest_type, constituents, num_components); else result = constituents[0]; - store_def(ctx, &intr->dest.ssa, result, nir_type_uint); + store_def(ctx, &intr->def, result, nir_type_uint); } static void @@ -2773,9 +2773,9 @@ emit_store_shared(struct ntv_context *ctx, nir_intrinsic_instr *intr) static void emit_load_scratch(struct ntv_context *ctx, nir_intrinsic_instr *intr) { - SpvId dest_type = get_def_type(ctx, &intr->dest.ssa, nir_type_uint); - unsigned num_components = intr->dest.ssa.num_components; - unsigned bit_size = intr->dest.ssa.bit_size; + SpvId dest_type = get_def_type(ctx, &intr->def, nir_type_uint); + unsigned num_components = intr->def.num_components; + unsigned bit_size = intr->def.bit_size; SpvId uint_type = get_uvec_type(ctx, bit_size, 1); SpvId ptr_type = spirv_builder_type_pointer(&ctx->builder, SpvStorageClassPrivate, @@ -2798,7 +2798,7 @@ emit_load_scratch(struct ntv_context *ctx, nir_intrinsic_instr *intr) result = spirv_builder_emit_composite_construct(&ctx->builder, dest_type, constituents, num_components); else result = constituents[0]; - store_def(ctx, &intr->dest.ssa, result, nir_type_uint); + store_def(ctx, &intr->def, result, nir_type_uint); } static void @@ -2839,12 +2839,12 @@ emit_load_push_const(struct ntv_context *ctx, nir_intrinsic_instr *intr) SpvId load_type = get_uvec_type(ctx, 32, 1); /* number of components being loaded */ - unsigned num_components = intr->dest.ssa.num_components; + unsigned num_components = intr->def.num_components; SpvId constituents[NIR_MAX_VEC_COMPONENTS * 2]; SpvId result; /* destination type for the load */ - SpvId type = get_def_uvec_type(ctx, &intr->dest.ssa); + SpvId type = get_def_uvec_type(ctx, &intr->def); SpvId one = emit_uint_const(ctx, 32, 1); /* we grab a single array member at a time, so it's a pointer to a uint */ @@ -2885,21 +2885,21 @@ emit_load_push_const(struct ntv_context *ctx, nir_intrinsic_instr *intr) } else result = constituents[0]; - store_def(ctx, &intr->dest.ssa, result, nir_type_uint); + store_def(ctx, &intr->def, result, nir_type_uint); } static void emit_load_global(struct ntv_context *ctx, nir_intrinsic_instr *intr) { spirv_builder_emit_cap(&ctx->builder, SpvCapabilityPhysicalStorageBufferAddresses); - SpvId dest_type = get_def_type(ctx, &intr->dest.ssa, nir_type_uint); + SpvId dest_type = get_def_type(ctx, &intr->def, nir_type_uint); SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder, SpvStorageClassPhysicalStorageBuffer, dest_type); nir_alu_type atype; SpvId ptr = emit_bitcast(ctx, pointer_type, get_src(ctx, &intr->src[0], &atype)); SpvId result = spirv_builder_emit_load(&ctx->builder, dest_type, ptr); - store_def(ctx, &intr->dest.ssa, result, nir_type_uint); + store_def(ctx, &intr->def, result, nir_type_uint); } static void @@ -2925,7 +2925,7 @@ emit_load_reg(struct ntv_context *ctx, nir_intrinsic_instr *intr) nir_intrinsic_instr *decl = nir_reg_get_decl(intr->src[0].ssa); unsigned num_components = nir_intrinsic_num_components(decl); unsigned bit_size = nir_intrinsic_bit_size(decl); - unsigned index = decl->dest.ssa.index; + unsigned index = decl->def.index; assert(index < ctx->num_defs); init_reg(ctx, decl, nir_type_uint); @@ -2935,7 +2935,7 @@ emit_load_reg(struct ntv_context *ctx, nir_intrinsic_instr *intr) SpvId var = ctx->defs[index]; SpvId type = get_alu_type(ctx, atype, num_components, bit_size); SpvId result = spirv_builder_emit_load(&ctx->builder, type, var); - store_def(ctx, &intr->dest.ssa, result, atype); + store_def(ctx, &intr->def, result, atype); } static void @@ -2945,7 +2945,7 @@ emit_store_reg(struct ntv_context *ctx, nir_intrinsic_instr *intr) SpvId param = get_src(ctx, &intr->src[0], &atype); nir_intrinsic_instr *decl = nir_reg_get_decl(intr->src[1].ssa); - unsigned index = decl->dest.ssa.index; + unsigned index = decl->def.index; unsigned num_components = nir_intrinsic_num_components(decl); unsigned bit_size = nir_intrinsic_bit_size(decl); @@ -3002,8 +3002,8 @@ emit_load_front_face(struct ntv_context *ctx, nir_intrinsic_instr *intr) SpvId result = spirv_builder_emit_load(&ctx->builder, var_type, ctx->front_face_var); - assert(1 == intr->dest.ssa.num_components); - store_def(ctx, &intr->dest.ssa, result, nir_type_bool); + assert(1 == intr->def.num_components); + store_def(ctx, &intr->def, result, nir_type_bool); } static void @@ -3033,8 +3033,8 @@ emit_load_uint_input(struct ntv_context *ctx, nir_intrinsic_instr *intr, SpvId * } SpvId result = spirv_builder_emit_load(&ctx->builder, var_type, load_var); - assert(1 == intr->dest.ssa.num_components); - store_def(ctx, &intr->dest.ssa, result, nir_type_uint); + assert(1 == intr->def.num_components); + store_def(ctx, &intr->def, result, nir_type_uint); } static void @@ -3044,19 +3044,19 @@ emit_load_vec_input(struct ntv_context *ctx, nir_intrinsic_instr *intr, SpvId *v switch (type) { case nir_type_bool: - var_type = get_bvec_type(ctx, intr->dest.ssa.num_components); + var_type = get_bvec_type(ctx, intr->def.num_components); break; case nir_type_int: - var_type = get_ivec_type(ctx, intr->dest.ssa.bit_size, - intr->dest.ssa.num_components); + var_type = get_ivec_type(ctx, intr->def.bit_size, + intr->def.num_components); break; case nir_type_uint: - var_type = get_uvec_type(ctx, intr->dest.ssa.bit_size, - intr->dest.ssa.num_components); + var_type = get_uvec_type(ctx, intr->def.bit_size, + intr->def.num_components); break; case nir_type_float: - var_type = get_fvec_type(ctx, intr->dest.ssa.bit_size, - intr->dest.ssa.num_components); + var_type = get_fvec_type(ctx, intr->def.bit_size, + intr->def.num_components); break; default: unreachable("unknown type passed"); @@ -3068,7 +3068,7 @@ emit_load_vec_input(struct ntv_context *ctx, nir_intrinsic_instr *intr, SpvId *v builtin); SpvId result = spirv_builder_emit_load(&ctx->builder, var_type, *var_id); - store_def(ctx, &intr->dest.ssa, result, type); + store_def(ctx, &intr->def, result, type); } static void @@ -3108,18 +3108,18 @@ emit_interpolate(struct ntv_context *ctx, nir_intrinsic_instr *intr) result = emit_builtin_unop(ctx, op, get_glsl_type(ctx, gtype), ptr); else result = emit_builtin_binop(ctx, op, get_glsl_type(ctx, gtype), ptr, src1); - store_def(ctx, &intr->dest.ssa, result, ptype); + store_def(ctx, &intr->def, result, ptype); } static void handle_atomic_op(struct ntv_context *ctx, nir_intrinsic_instr *intr, SpvId ptr, SpvId param, SpvId param2, nir_alu_type type) { - SpvId dest_type = get_def_type(ctx, &intr->dest.ssa, type); + SpvId dest_type = get_def_type(ctx, &intr->def, type); SpvId result = emit_atomic(ctx, - get_atomic_op(ctx, intr->dest.ssa.bit_size, nir_intrinsic_atomic_op(intr)), + get_atomic_op(ctx, intr->def.bit_size, nir_intrinsic_atomic_op(intr)), dest_type, ptr, param, param2); assert(result); - store_def(ctx, &intr->dest.ssa, result, type); + store_def(ctx, &intr->def, result, type); } static void @@ -3150,7 +3150,7 @@ static void emit_shared_atomic_intrinsic(struct ntv_context *ctx, nir_intrinsic_instr *intr) { unsigned bit_size = nir_src_bit_size(intr->src[1]); - SpvId dest_type = get_def_type(ctx, &intr->dest.ssa, nir_type_uint); + SpvId dest_type = get_def_type(ctx, &intr->def, nir_type_uint); nir_alu_type atype; nir_alu_type ret_type = nir_atomic_op_type(nir_intrinsic_atomic_op(intr)) == nir_type_float ? nir_type_float : nir_type_uint; SpvId param = get_src(ctx, &intr->src[1], &atype); @@ -3213,7 +3213,7 @@ emit_get_ssbo_size(struct ntv_context *ctx, nir_intrinsic_instr *intr) result = emit_binop(ctx, SpvOpIAdd, uint_type, result, emit_uint_const(ctx, 32, glsl_get_struct_field_offset(bare_type, last_member_idx))); - store_def(ctx, &intr->dest.ssa, result, nir_type_uint); + store_def(ctx, &intr->def, result, nir_type_uint); } static SpvId @@ -3265,7 +3265,7 @@ emit_image_deref_store(struct ntv_context *ctx, nir_intrinsic_instr *intr) } static SpvId -extract_sparse_load(struct ntv_context *ctx, SpvId result, SpvId dest_type, nir_def *dest_ssa) +extract_sparse_load(struct ntv_context *ctx, SpvId result, SpvId dest_type, nir_def *def) { /* Result Type must be an OpTypeStruct with two members. * The first member’s type must be an integer type scalar. @@ -3276,24 +3276,24 @@ extract_sparse_load(struct ntv_context *ctx, SpvId result, SpvId dest_type, nir_ SpvId resident = spirv_builder_emit_composite_extract(&ctx->builder, spirv_builder_type_uint(&ctx->builder, 32), result, &idx, 1); idx = 1; /* normal vec4 return */ - if (dest_ssa->num_components == 4) + if (def->num_components == 4) result = spirv_builder_emit_composite_extract(&ctx->builder, dest_type, result, &idx, 1); else { /* shadow */ - assert(dest_ssa->num_components == 1); - SpvId type = spirv_builder_type_float(&ctx->builder, dest_ssa->bit_size); + assert(def->num_components == 1); + SpvId type = spirv_builder_type_float(&ctx->builder, def->bit_size); SpvId val[2]; /* pad to 2 components: the upcoming is_sparse_texels_resident instr will always use the * separate residency value, but the shader still expects this return to be a vec2, * so give it a vec2 */ val[0] = spirv_builder_emit_composite_extract(&ctx->builder, type, result, &idx, 1); - val[1] = emit_float_const(ctx, dest_ssa->bit_size, 0); - result = spirv_builder_emit_composite_construct(&ctx->builder, get_fvec_type(ctx, dest_ssa->bit_size, 2), val, 2); + val[1] = emit_float_const(ctx, def->bit_size, 0); + result = spirv_builder_emit_composite_construct(&ctx->builder, get_fvec_type(ctx, def->bit_size, 2), val, 2); } assert(resident != 0); - assert(dest_ssa->index < ctx->num_defs); - ctx->resident_defs[dest_ssa->index] = resident; + assert(def->index < ctx->num_defs); + ctx->resident_defs[def->index] = resident; return result; } @@ -3315,19 +3315,19 @@ emit_image_deref_load(struct ntv_context *ctx, nir_intrinsic_instr *intr) glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_SUBPASS_MS; SpvId sample = use_sample ? get_src(ctx, &intr->src[2], &atype) : 0; SpvId dest_type = spirv_builder_type_vector(&ctx->builder, base_type, - intr->dest.ssa.num_components); + intr->def.num_components); SpvId result = spirv_builder_emit_image_read(&ctx->builder, dest_type, img, coord, 0, sample, 0, sparse); if (sparse) - result = extract_sparse_load(ctx, result, dest_type, &intr->dest.ssa); + result = extract_sparse_load(ctx, result, dest_type, &intr->def); if (!sparse && mediump) { spirv_builder_emit_decoration(&ctx->builder, result, SpvDecorationRelaxedPrecision); } - store_def(ctx, &intr->dest.ssa, result, nir_get_nir_type_for_glsl_base_type(glsl_get_sampler_result_type(type))); + store_def(ctx, &intr->def, result, nir_get_nir_type_for_glsl_base_type(glsl_get_sampler_result_type(type))); } static void @@ -3347,7 +3347,7 @@ emit_image_deref_size(struct ntv_context *ctx, nir_intrinsic_instr *intr) spirv_builder_emit_cap(&ctx->builder, SpvCapabilityImageQuery); SpvId result = spirv_builder_emit_image_query_size(&ctx->builder, get_uvec_type(ctx, 32, num_components), img, 0); - store_def(ctx, &intr->dest.ssa, result, nir_type_uint); + store_def(ctx, &intr->def, result, nir_type_uint); } static void @@ -3361,8 +3361,8 @@ emit_image_deref_samples(struct ntv_context *ctx, nir_intrinsic_instr *intr) SpvId img = spirv_builder_emit_load(&ctx->builder, img_type, img_var); spirv_builder_emit_cap(&ctx->builder, SpvCapabilityImageQuery); - SpvId result = spirv_builder_emit_unop(&ctx->builder, SpvOpImageQuerySamples, get_def_type(ctx, &intr->dest.ssa, nir_type_uint), img); - store_def(ctx, &intr->dest.ssa, result, nir_type_uint); + SpvId result = spirv_builder_emit_unop(&ctx->builder, SpvOpImageQuerySamples, get_def_type(ctx, &intr->def, nir_type_uint), img); + store_def(ctx, &intr->def, result, nir_type_uint); } static void @@ -3388,14 +3388,14 @@ emit_image_intrinsic(struct ntv_context *ctx, nir_intrinsic_instr *intr) */ nir_alu_type ntype = nir_get_nir_type_for_glsl_base_type(glsl_type); if (ptype != ntype) { - SpvId cast_type = get_def_type(ctx, &intr->dest.ssa, ntype); + SpvId cast_type = get_def_type(ctx, &intr->def, ntype); param = emit_bitcast(ctx, cast_type, param); } if (intr->intrinsic == nir_intrinsic_image_deref_atomic_swap) { param2 = get_src(ctx, &intr->src[4], &ptype); if (ptype != ntype) { - SpvId cast_type = get_def_type(ctx, &intr->dest.ssa, ntype); + SpvId cast_type = get_def_type(ctx, &intr->def, ntype); param2 = emit_bitcast(ctx, cast_type, param2); } } @@ -3408,10 +3408,10 @@ emit_ballot(struct ntv_context *ctx, nir_intrinsic_instr *intr) { spirv_builder_emit_cap(&ctx->builder, SpvCapabilitySubgroupBallotKHR); spirv_builder_emit_extension(&ctx->builder, "SPV_KHR_shader_ballot"); - SpvId type = get_def_uvec_type(ctx, &intr->dest.ssa); + SpvId type = get_def_uvec_type(ctx, &intr->def); nir_alu_type atype; SpvId result = emit_unop(ctx, SpvOpSubgroupBallotKHR, type, get_src(ctx, &intr->src[0], &atype)); - store_def(ctx, &intr->dest.ssa, result, nir_type_uint); + store_def(ctx, &intr->def, result, nir_type_uint); } static void @@ -3421,9 +3421,9 @@ emit_read_first_invocation(struct ntv_context *ctx, nir_intrinsic_instr *intr) spirv_builder_emit_extension(&ctx->builder, "SPV_KHR_shader_ballot"); nir_alu_type atype; SpvId src = get_src(ctx, &intr->src[0], &atype); - SpvId type = get_def_type(ctx, &intr->dest.ssa, atype); + SpvId type = get_def_type(ctx, &intr->def, atype); SpvId result = emit_unop(ctx, SpvOpSubgroupFirstInvocationKHR, type, src); - store_def(ctx, &intr->dest.ssa, result, atype); + store_def(ctx, &intr->def, result, atype); } static void @@ -3433,11 +3433,11 @@ emit_read_invocation(struct ntv_context *ctx, nir_intrinsic_instr *intr) spirv_builder_emit_extension(&ctx->builder, "SPV_KHR_shader_ballot"); nir_alu_type atype, itype; SpvId src = get_src(ctx, &intr->src[0], &atype); - SpvId type = get_def_type(ctx, &intr->dest.ssa, atype); + SpvId type = get_def_type(ctx, &intr->def, atype); SpvId result = emit_binop(ctx, SpvOpSubgroupReadInvocationKHR, type, src, get_src(ctx, &intr->src[1], &itype)); - store_def(ctx, &intr->dest.ssa, result, atype); + store_def(ctx, &intr->def, result, atype); } static void @@ -3447,9 +3447,9 @@ emit_shader_clock(struct ntv_context *ctx, nir_intrinsic_instr *intr) spirv_builder_emit_extension(&ctx->builder, "SPV_KHR_shader_clock"); SpvScope scope = get_scope(nir_intrinsic_memory_scope(intr)); - SpvId type = get_def_type(ctx, &intr->dest.ssa, nir_type_uint); + SpvId type = get_def_type(ctx, &intr->def, nir_type_uint); SpvId result = spirv_builder_emit_unop_const(&ctx->builder, SpvOpReadClockKHR, type, scope); - store_def(ctx, &intr->dest.ssa, result, nir_type_uint); + store_def(ctx, &intr->def, result, nir_type_uint); } static void @@ -3457,7 +3457,7 @@ emit_is_sparse_texels_resident(struct ntv_context *ctx, nir_intrinsic_instr *int { spirv_builder_emit_cap(&ctx->builder, SpvCapabilitySparseResidency); - SpvId type = get_def_type(ctx, &intr->dest.ssa, nir_type_uint); + SpvId type = get_def_type(ctx, &intr->def, nir_type_uint); /* this will always be stored with the ssa index of the parent instr */ nir_def *ssa = intr->src[0].ssa; @@ -3469,7 +3469,7 @@ emit_is_sparse_texels_resident(struct ntv_context *ctx, nir_intrinsic_instr *int SpvId resident = ctx->resident_defs[index]; SpvId result = spirv_builder_emit_unop(&ctx->builder, SpvOpImageSparseTexelsResident, type, resident); - store_def(ctx, &intr->dest.ssa, result, nir_type_uint); + store_def(ctx, &intr->def, result, nir_type_uint); } static void @@ -3494,7 +3494,7 @@ emit_vote(struct ntv_context *ctx, nir_intrinsic_instr *intr) spirv_builder_emit_cap(&ctx->builder, SpvCapabilityGroupNonUniformVote); nir_alu_type atype; SpvId result = spirv_builder_emit_vote(&ctx->builder, op, get_src(ctx, &intr->src[0], &atype)); - store_def_raw(ctx, &intr->dest.ssa, result, nir_type_bool); + store_def_raw(ctx, &intr->def, result, nir_type_bool); } static void @@ -3503,7 +3503,7 @@ emit_is_helper_invocation(struct ntv_context *ctx, nir_intrinsic_instr *intr) spirv_builder_emit_extension(&ctx->builder, "SPV_EXT_demote_to_helper_invocation"); SpvId result = spirv_is_helper_invocation(&ctx->builder); - store_def(ctx, &intr->dest.ssa, result, nir_type_bool); + store_def(ctx, &intr->def, result, nir_type_bool); } static void @@ -3762,7 +3762,7 @@ emit_intrinsic(struct ntv_context *ctx, nir_intrinsic_instr *intr) case nir_intrinsic_load_workgroup_size: assert(ctx->local_group_size_var); - store_def(ctx, &intr->dest.ssa, ctx->local_group_size_var, nir_type_uint); + store_def(ctx, &intr->def, ctx->local_group_size_var, nir_type_uint); break; case nir_intrinsic_load_shared: @@ -4036,8 +4036,8 @@ emit_tex(struct ntv_context *ctx, nir_tex_instr *tex) } if (tex->is_sparse) - tex->dest.ssa.num_components--; - SpvId dest_type = get_def_type(ctx, &tex->dest.ssa, tex->dest_type); + tex->def.num_components--; + SpvId dest_type = get_def_type(ctx, &tex->def, tex->dest_type); if (nir_tex_instr_is_query(tex)) spirv_builder_emit_cap(&ctx->builder, SpvCapabilityImageQuery); @@ -4065,7 +4065,7 @@ emit_tex(struct ntv_context *ctx, nir_tex_instr *tex) SpvId result = spirv_builder_emit_image_query_size(&ctx->builder, dest_type, image, lod); - store_def(ctx, &tex->dest.ssa, result, tex->dest_type); + store_def(ctx, &tex->def, result, tex->dest_type); return; } if (tex->op == nir_texop_query_levels) { @@ -4074,7 +4074,7 @@ emit_tex(struct ntv_context *ctx, nir_tex_instr *tex) spirv_builder_emit_image(&ctx->builder, image_type, load); SpvId result = spirv_builder_emit_image_query_levels(&ctx->builder, dest_type, image); - store_def(ctx, &tex->dest.ssa, result, tex->dest_type); + store_def(ctx, &tex->def, result, tex->dest_type); return; } if (tex->op == nir_texop_texture_samples) { @@ -4083,7 +4083,7 @@ emit_tex(struct ntv_context *ctx, nir_tex_instr *tex) spirv_builder_emit_image(&ctx->builder, image_type, load); SpvId result = spirv_builder_emit_unop(&ctx->builder, SpvOpImageQuerySamples, dest_type, image); - store_def(ctx, &tex->dest.ssa, result, tex->dest_type); + store_def(ctx, &tex->def, result, tex->dest_type); return; } @@ -4113,11 +4113,11 @@ emit_tex(struct ntv_context *ctx, nir_tex_instr *tex) SpvId result = spirv_builder_emit_image_query_lod(&ctx->builder, dest_type, load, coord); - store_def(ctx, &tex->dest.ssa, result, tex->dest_type); + store_def(ctx, &tex->def, result, tex->dest_type); return; } SpvId actual_dest_type; - unsigned num_components = tex->dest.ssa.num_components; + unsigned num_components = tex->def.num_components; switch (nir_alu_type_get_base_type(tex->dest_type)) { case nir_type_int: actual_dest_type = get_ivec_type(ctx, 32, num_components); @@ -4179,18 +4179,18 @@ emit_tex(struct ntv_context *ctx, nir_tex_instr *tex) } if (tex->is_sparse) - result = extract_sparse_load(ctx, result, actual_dest_type, &tex->dest.ssa); + result = extract_sparse_load(ctx, result, actual_dest_type, &tex->def); - if (tex->dest.ssa.bit_size != 32) { + if (tex->def.bit_size != 32) { /* convert FP32 to FP16 */ result = emit_unop(ctx, SpvOpFConvert, dest_type, result); } if (tex->is_sparse && tex->is_shadow) - tex->dest.ssa.num_components++; - store_def(ctx, &tex->dest.ssa, result, tex->dest_type); + tex->def.num_components++; + store_def(ctx, &tex->def, result, tex->dest_type); if (tex->is_sparse && !tex->is_shadow) - tex->dest.ssa.num_components++; + tex->def.num_components++; } static void @@ -4250,7 +4250,7 @@ emit_deref_var(struct ntv_context *ctx, nir_deref_instr *deref) struct hash_entry *he = _mesa_hash_table_search(ctx->vars, deref->var); assert(he); SpvId result = (SpvId)(intptr_t)he->data; - store_def_raw(ctx, &deref->dest.ssa, result, get_nir_alu_type(deref->type)); + store_def_raw(ctx, &deref->def, result, get_nir_alu_type(deref->type)); } static void @@ -4334,7 +4334,7 @@ emit_deref_array(struct ntv_context *ctx, nir_deref_instr *deref) base, &index, 1); /* uint is a bit of a lie here, it's really just an opaque type */ - store_def(ctx, &deref->dest.ssa, result, get_nir_alu_type(deref->type)); + store_def(ctx, &deref->def, result, get_nir_alu_type(deref->type)); } static void @@ -4360,7 +4360,7 @@ emit_deref_struct(struct ntv_context *ctx, nir_deref_instr *deref) get_src(ctx, &deref->parent, &atype), &index, 1); /* uint is a bit of a lie here, it's really just an opaque type */ - store_def(ctx, &deref->dest.ssa, result, get_nir_alu_type(deref->type)); + store_def(ctx, &deref->def, result, get_nir_alu_type(deref->type)); } static void diff --git a/src/gallium/drivers/zink/zink_compiler.c b/src/gallium/drivers/zink/zink_compiler.c index 9cc119b..31a6247 100644 --- a/src/gallium/drivers/zink/zink_compiler.c +++ b/src/gallium/drivers/zink/zink_compiler.c @@ -142,7 +142,7 @@ lower_64bit_vertex_attribs_instr(nir_builder *b, nir_instr *instr, void *data) def[3] = nir_vector_extract(b, load2, nir_imm_int(b, 1)); nir_def *new_vec = nir_vec(b, def, total_num_components); /* use the assembled dvec3/4 for all other uses of the load */ - nir_def_rewrite_uses_after(&intr->dest.ssa, new_vec, + nir_def_rewrite_uses_after(&intr->def, new_vec, new_vec->parent_instr); /* remove the original instr and its deref chain */ @@ -194,7 +194,7 @@ lower_64bit_uint_attribs_instr(nir_builder *b, nir_instr *instr, void *data) nir_def *casted[2]; for (unsigned i = 0; i < num_components; i++) casted[i] = nir_pack_64_2x32(b, nir_channels(b, load, BITFIELD_RANGE(i * 2, 2))); - nir_def_rewrite_uses(&intr->dest.ssa, nir_vec(b, casted, num_components)); + nir_def_rewrite_uses(&intr->def, nir_vec(b, casted, num_components)); /* remove the original instr and its deref chain */ nir_instr *parent = intr->src[0].ssa->parent_instr; @@ -235,16 +235,16 @@ lower_basevertex_instr(nir_builder *b, nir_instr *in, void *data) nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_push_constant_zink); load->src[0] = nir_src_for_ssa(nir_imm_int(b, ZINK_GFX_PUSHCONST_DRAW_MODE_IS_INDEXED)); load->num_components = 1; - nir_def_init(&load->instr, &load->dest.ssa, 1, 32); + nir_def_init(&load->instr, &load->def, 1, 32); nir_builder_instr_insert(b, &load->instr); nir_def *composite = nir_build_alu(b, nir_op_bcsel, - nir_build_alu(b, nir_op_ieq, &load->dest.ssa, nir_imm_int(b, 1), NULL, NULL), - &instr->dest.ssa, + nir_build_alu(b, nir_op_ieq, &load->def, nir_imm_int(b, 1), NULL, NULL), + &instr->def, nir_imm_int(b, 0), NULL); - nir_def_rewrite_uses_after(&instr->dest.ssa, composite, + nir_def_rewrite_uses_after(&instr->def, composite, composite->parent_instr); return true; } @@ -275,10 +275,10 @@ lower_drawid_instr(nir_builder *b, nir_instr *in, void *data) nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_push_constant_zink); load->src[0] = nir_src_for_ssa(nir_imm_int(b, ZINK_GFX_PUSHCONST_DRAW_ID)); load->num_components = 1; - nir_def_init(&load->instr, &load->dest.ssa, 1, 32); + nir_def_init(&load->instr, &load->def, 1, 32); nir_builder_instr_insert(b, &load->instr); - nir_def_rewrite_uses(&instr->dest.ssa, &load->dest.ssa); + nir_def_rewrite_uses(&instr->def, &load->def); return true; } @@ -840,7 +840,7 @@ lower_line_stipple_fs(nir_shader *shader) nir_def *stipple_pos = nir_interp_deref_at_sample(&b, 1, 32, - &nir_build_deref_var(&b, stipple)->dest.ssa, index); + &nir_build_deref_var(&b, stipple)->def, index); stipple_pos = nir_fmod(&b, nir_fdiv(&b, stipple_pos, factor), nir_imm_float(&b, 16.0)); stipple_pos = nir_f2i32(&b, stipple_pos); @@ -1313,7 +1313,7 @@ lower_system_values_to_inlined_uniforms_instr(nir_builder *b, nir_instr *instr, nir_imm_int(b, inlined_uniform_offset), .align_mul = 4, .align_offset = 0, .range_base = 0, .range = ~0); - nir_def_rewrite_uses(&intrin->dest.ssa, new_dest_def); + nir_def_rewrite_uses(&intrin->def, new_dest_def); nir_instr_remove(instr); return true; } @@ -1529,19 +1529,19 @@ bound_bo_access_instr(nir_builder *b, nir_instr *instr, void *data) switch (intr->intrinsic) { case nir_intrinsic_store_ssbo: - var = bo->ssbo[intr->dest.ssa.bit_size >> 4]; + var = bo->ssbo[intr->def.bit_size >> 4]; offset = intr->src[2].ssa; is_load = false; break; case nir_intrinsic_load_ssbo: - var = bo->ssbo[intr->dest.ssa.bit_size >> 4]; + var = bo->ssbo[intr->def.bit_size >> 4]; offset = intr->src[1].ssa; break; case nir_intrinsic_load_ubo: if (nir_src_is_const(intr->src[0]) && nir_src_as_const_value(intr->src[0])->u32 == 0) - var = bo->uniforms[intr->dest.ssa.bit_size >> 4]; + var = bo->uniforms[intr->def.bit_size >> 4]; else - var = bo->ubo[intr->dest.ssa.bit_size >> 4]; + var = bo->ubo[intr->def.bit_size >> 4]; offset = intr->src[1].ssa; break; default: @@ -1564,13 +1564,13 @@ bound_bo_access_instr(nir_builder *b, nir_instr *instr, void *data) if (offset_bytes + i >= size) { rewrites++; if (is_load) - result[i] = nir_imm_zero(b, 1, intr->dest.ssa.bit_size); + result[i] = nir_imm_zero(b, 1, intr->def.bit_size); } } assert(rewrites == intr->num_components); if (is_load) { nir_def *load = nir_vec(b, result, intr->num_components); - nir_def_rewrite_uses(&intr->dest.ssa, load); + nir_def_rewrite_uses(&intr->def, load); } nir_instr_remove(instr); return true; @@ -1657,10 +1657,10 @@ lower_fbfetch_instr(nir_builder *b, nir_instr *instr, void *data) enum glsl_sampler_dim dim = ms ? GLSL_SAMPLER_DIM_SUBPASS_MS : GLSL_SAMPLER_DIM_SUBPASS; fbfetch->type = glsl_image_type(dim, false, GLSL_TYPE_FLOAT); nir_shader_add_variable(b->shader, fbfetch); - nir_def *deref = &nir_build_deref_var(b, fbfetch)->dest.ssa; + nir_def *deref = &nir_build_deref_var(b, fbfetch)->def; nir_def *sample = ms ? nir_load_sample_id(b) : nir_undef(b, 1, 32); nir_def *load = nir_image_deref_load(b, 4, 32, deref, nir_imm_vec4(b, 0, 0, 0, 1), sample, nir_imm_int(b, 0)); - nir_def_rewrite_uses(&intr->dest.ssa, load); + nir_def_rewrite_uses(&intr->def, load); return true; } @@ -1720,11 +1720,11 @@ lower_txf_lod_robustness_instr(nir_builder *b, nir_instr *in, void *data) levels->src[!!(offset_idx >= 0)].src_type = nir_tex_src_texture_handle; nir_src_copy(&levels->src[!!(offset_idx >= 0)].src, &txf->src[handle_idx].src, &levels->instr); } - nir_def_init(&levels->instr, &levels->dest.ssa, + nir_def_init(&levels->instr, &levels->def, nir_tex_instr_dest_size(levels), 32); nir_builder_instr_insert(b, &levels->instr); - nir_if *lod_oob_if = nir_push_if(b, nir_ilt(b, lod, &levels->dest.ssa)); + nir_if *lod_oob_if = nir_push_if(b, nir_ilt(b, lod, &levels->def)); nir_tex_instr *new_txf = nir_instr_as_tex(nir_instr_clone(b->shader, in)); nir_builder_instr_insert(b, &new_txf->instr); @@ -1736,9 +1736,9 @@ lower_txf_lod_robustness_instr(nir_builder *b, nir_instr *in, void *data) nir_def *oob_val = nir_build_imm(b, nir_tex_instr_dest_size(txf), bit_size, oob_values); nir_pop_if(b, lod_oob_else); - nir_def *robust_txf = nir_if_phi(b, &new_txf->dest.ssa, oob_val); + nir_def *robust_txf = nir_if_phi(b, &new_txf->def, oob_val); - nir_def_rewrite_uses(&txf->dest.ssa, robust_txf); + nir_def_rewrite_uses(&txf->def, robust_txf); nir_instr_remove_v(in); return true; } @@ -2088,7 +2088,7 @@ lower_attrib(nir_builder *b, nir_instr *instr, void *data) loads[0] = nir_channel(b, loads[0], 0); } nir_def *new_load = nir_vec(b, loads, num_components); - nir_def_rewrite_uses(&intr->dest.ssa, new_load); + nir_def_rewrite_uses(&intr->def, new_load); nir_instr_remove_v(instr); return true; } @@ -2144,7 +2144,7 @@ rewrite_bo_access_instr(nir_builder *b, nir_instr *instr, void *data) case nir_intrinsic_ssbo_atomic: case nir_intrinsic_ssbo_atomic_swap: { /* convert offset to uintN_t[idx] */ - nir_def *offset = nir_udiv_imm(b, intr->src[1].ssa, intr->dest.ssa.bit_size / 8); + nir_def *offset = nir_udiv_imm(b, intr->src[1].ssa, intr->def.bit_size / 8); nir_instr_rewrite_src_ssa(instr, &intr->src[1], offset); return true; } @@ -2154,15 +2154,15 @@ rewrite_bo_access_instr(nir_builder *b, nir_instr *instr, void *data) bool force_2x32 = intr->intrinsic == nir_intrinsic_load_ubo && nir_src_is_const(intr->src[0]) && nir_src_as_uint(intr->src[0]) == 0 && - intr->dest.ssa.bit_size == 64 && + intr->def.bit_size == 64 && nir_intrinsic_align_offset(intr) % 8 != 0; - force_2x32 |= intr->dest.ssa.bit_size == 64 && !has_int64; - nir_def *offset = nir_udiv_imm(b, intr->src[1].ssa, (force_2x32 ? 32 : intr->dest.ssa.bit_size) / 8); + force_2x32 |= intr->def.bit_size == 64 && !has_int64; + nir_def *offset = nir_udiv_imm(b, intr->src[1].ssa, (force_2x32 ? 32 : intr->def.bit_size) / 8); nir_instr_rewrite_src_ssa(instr, &intr->src[1], offset); /* if 64bit isn't supported, 64bit loads definitely aren't supported, so rewrite as 2x32 with cast and pray */ if (force_2x32) { /* this is always scalarized */ - assert(intr->dest.ssa.num_components == 1); + assert(intr->def.num_components == 1); /* rewrite as 2x32 */ nir_def *load[2]; for (unsigned i = 0; i < 2; i++) { @@ -2174,27 +2174,27 @@ rewrite_bo_access_instr(nir_builder *b, nir_instr *instr, void *data) } /* cast back to 64bit */ nir_def *casted = nir_pack_64_2x32_split(b, load[0], load[1]); - nir_def_rewrite_uses(&intr->dest.ssa, casted); + nir_def_rewrite_uses(&intr->def, casted); nir_instr_remove(instr); } return true; } case nir_intrinsic_load_shared: b->cursor = nir_before_instr(instr); - bool force_2x32 = intr->dest.ssa.bit_size == 64 && !has_int64; - nir_def *offset = nir_udiv_imm(b, intr->src[0].ssa, (force_2x32 ? 32 : intr->dest.ssa.bit_size) / 8); + bool force_2x32 = intr->def.bit_size == 64 && !has_int64; + nir_def *offset = nir_udiv_imm(b, intr->src[0].ssa, (force_2x32 ? 32 : intr->def.bit_size) / 8); nir_instr_rewrite_src_ssa(instr, &intr->src[0], offset); /* if 64bit isn't supported, 64bit loads definitely aren't supported, so rewrite as 2x32 with cast and pray */ if (force_2x32) { /* this is always scalarized */ - assert(intr->dest.ssa.num_components == 1); + assert(intr->def.num_components == 1); /* rewrite as 2x32 */ nir_def *load[2]; for (unsigned i = 0; i < 2; i++) load[i] = nir_load_shared(b, 1, 32, nir_iadd_imm(b, intr->src[0].ssa, i), .align_mul = 4, .align_offset = 0); /* cast back to 64bit */ nir_def *casted = nir_pack_64_2x32_split(b, load[0], load[1]); - nir_def_rewrite_uses(&intr->dest.ssa, casted); + nir_def_rewrite_uses(&intr->def, casted); nir_instr_remove(instr); return true; } @@ -2312,7 +2312,7 @@ rewrite_atomic_ssbo_instr(nir_builder *b, nir_instr *instr, struct bo_vars *bo) nir_def *offset = intr->src[1].ssa; nir_src *src = &intr->src[0]; nir_variable *var = get_bo_var(b->shader, bo, true, src, - intr->dest.ssa.bit_size); + intr->def.bit_size); nir_deref_instr *deref_var = nir_build_deref_var(b, var); nir_def *idx = src->ssa; if (bo->first_ssbo) @@ -2322,25 +2322,25 @@ rewrite_atomic_ssbo_instr(nir_builder *b, nir_instr *instr, struct bo_vars *bo) /* generate new atomic deref ops for every component */ nir_def *result[4]; - unsigned num_components = intr->dest.ssa.num_components; + unsigned num_components = intr->def.num_components; for (unsigned i = 0; i < num_components; i++) { nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct, offset); nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(b->shader, op); - nir_def_init(&new_instr->instr, &new_instr->dest.ssa, 1, - intr->dest.ssa.bit_size); + nir_def_init(&new_instr->instr, &new_instr->def, 1, + intr->def.bit_size); nir_intrinsic_set_atomic_op(new_instr, nir_intrinsic_atomic_op(intr)); - new_instr->src[0] = nir_src_for_ssa(&deref_arr->dest.ssa); + new_instr->src[0] = nir_src_for_ssa(&deref_arr->def); /* deref ops have no offset src, so copy the srcs after it */ for (unsigned i = 2; i < nir_intrinsic_infos[intr->intrinsic].num_srcs; i++) nir_src_copy(&new_instr->src[i - 1], &intr->src[i], &new_instr->instr); nir_builder_instr_insert(b, &new_instr->instr); - result[i] = &new_instr->dest.ssa; + result[i] = &new_instr->def; offset = nir_iadd_imm(b, offset, 1); } nir_def *load = nir_vec(b, result, num_components); - nir_def_rewrite_uses(&intr->dest.ssa, load); + nir_def_rewrite_uses(&intr->def, load); nir_instr_remove(instr); } @@ -2370,12 +2370,12 @@ remove_bo_access_instr(nir_builder *b, nir_instr *instr, void *data) break; case nir_intrinsic_load_ssbo: src = &intr->src[0]; - var = get_bo_var(b->shader, bo, true, src, intr->dest.ssa.bit_size); + var = get_bo_var(b->shader, bo, true, src, intr->def.bit_size); offset = intr->src[1].ssa; break; case nir_intrinsic_load_ubo: src = &intr->src[0]; - var = get_bo_var(b->shader, bo, false, src, intr->dest.ssa.bit_size); + var = get_bo_var(b->shader, bo, false, src, intr->def.bit_size); offset = intr->src[1].ssa; ssbo = false; break; @@ -2391,25 +2391,25 @@ remove_bo_access_instr(nir_builder *b, nir_instr *instr, void *data) else if (ssbo && bo->first_ssbo) idx = nir_iadd_imm(b, idx, -bo->first_ssbo); nir_deref_instr *deref_array = nir_build_deref_array(b, deref_var, - nir_i2iN(b, idx, deref_var->dest.ssa.bit_size)); + nir_i2iN(b, idx, deref_var->def.bit_size)); nir_deref_instr *deref_struct = nir_build_deref_struct(b, deref_array, 0); assert(intr->num_components <= 2); if (is_load) { nir_def *result[2]; for (unsigned i = 0; i < intr->num_components; i++) { nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct, - nir_i2iN(b, offset, deref_struct->dest.ssa.bit_size)); + nir_i2iN(b, offset, deref_struct->def.bit_size)); result[i] = nir_load_deref(b, deref_arr); if (intr->intrinsic == nir_intrinsic_load_ssbo) nir_intrinsic_set_access(nir_instr_as_intrinsic(result[i]->parent_instr), nir_intrinsic_access(intr)); offset = nir_iadd_imm(b, offset, 1); } nir_def *load = nir_vec(b, result, intr->num_components); - nir_def_rewrite_uses(&intr->dest.ssa, load); + nir_def_rewrite_uses(&intr->def, load); } else { nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct, - nir_i2iN(b, offset, deref_struct->dest.ssa.bit_size)); - nir_build_store_deref(b, &deref_arr->dest.ssa, intr->src[0].ssa, BITFIELD_MASK(intr->num_components), nir_intrinsic_access(intr)); + nir_i2iN(b, offset, deref_struct->def.bit_size)); + nir_build_store_deref(b, &deref_arr->def, intr->src[0].ssa, BITFIELD_MASK(intr->num_components), nir_intrinsic_access(intr)); } nir_instr_remove(instr); return true; @@ -2642,8 +2642,8 @@ rewrite_read_as_0(nir_builder *b, nir_instr *instr, void *data) if (deref_var != var) return false; b->cursor = nir_before_instr(instr); - nir_def *zero = nir_imm_zero(b, intr->dest.ssa.num_components, - intr->dest.ssa.bit_size); + nir_def *zero = nir_imm_zero(b, intr->def.num_components, + intr->def.bit_size); if (b->shader->info.stage == MESA_SHADER_FRAGMENT) { switch (var->data.location) { case VARYING_SLOT_COL0: @@ -2651,14 +2651,14 @@ rewrite_read_as_0(nir_builder *b, nir_instr *instr, void *data) case VARYING_SLOT_BFC0: case VARYING_SLOT_BFC1: /* default color is 0,0,0,1 */ - if (intr->dest.ssa.num_components == 4) + if (intr->def.num_components == 4) zero = nir_vector_insert_imm(b, zero, nir_imm_float(b, 1.0), 3); break; default: break; } } - nir_def_rewrite_uses(&intr->dest.ssa, zero); + nir_def_rewrite_uses(&intr->def, zero); nir_instr_remove(instr); return true; } @@ -2839,7 +2839,7 @@ lower_64bit_vars_function(nir_shader *shader, nir_function_impl *impl, nir_varia if (nir_intrinsic_get_var(intr, 0) != var) break; if ((intr->intrinsic == nir_intrinsic_store_deref && intr->src[1].ssa->bit_size != 64) || - (intr->intrinsic == nir_intrinsic_load_deref && intr->dest.ssa.bit_size != 64)) + (intr->intrinsic == nir_intrinsic_load_deref && intr->def.bit_size != 64)) break; b.cursor = nir_before_instr(instr); nir_deref_instr *deref = nir_src_as_deref(intr->src[0]); @@ -2994,7 +2994,7 @@ lower_64bit_vars_function(nir_shader *shader, nir_function_impl *impl, nir_varia } dest = nir_vec(&b, comp, intr->num_components); } - nir_def_rewrite_uses_after(&intr->dest.ssa, dest, instr); + nir_def_rewrite_uses_after(&intr->def, dest, instr); } _mesa_set_add(deletes, instr); break; @@ -3098,8 +3098,8 @@ split_blocks(nir_shader *nir) deref->modes = nir_var_shader_temp; parent->modes = nir_var_shader_temp; b.cursor = nir_before_instr(instr); - nir_def *dest = &nir_build_deref_var(&b, members[deref->strct.index])->dest.ssa; - nir_def_rewrite_uses_after(&deref->dest.ssa, dest, &deref->instr); + nir_def *dest = &nir_build_deref_var(&b, members[deref->strct.index])->def; + nir_def_rewrite_uses_after(&deref->def, dest, &deref->instr); nir_instr_remove(&deref->instr); func_progress = true; break; @@ -3312,13 +3312,13 @@ rewrite_tex_dest(nir_builder *b, nir_tex_instr *tex, nir_variable *var, struct z enum glsl_base_type ret_type = glsl_get_sampler_result_type(type); bool is_int = glsl_base_type_is_integer(ret_type); unsigned bit_size = glsl_base_type_get_bit_size(ret_type); - unsigned dest_size = tex->dest.ssa.bit_size; + unsigned dest_size = tex->def.bit_size; b->cursor = nir_after_instr(&tex->instr); - unsigned num_components = tex->dest.ssa.num_components; + unsigned num_components = tex->def.num_components; bool rewrite_depth = tex->is_shadow && num_components > 1 && tex->op != nir_texop_tg4 && !tex->is_sparse; if (bit_size == dest_size && !rewrite_depth) return NULL; - nir_def *dest = &tex->dest.ssa; + nir_def *dest = &tex->def; if (rewrite_depth && zs) { /* If only .x is used in the NIR, then it's effectively not a legacy depth * sample anyway and we don't want to ask for shader recompiles. This is @@ -3334,20 +3334,20 @@ rewrite_tex_dest(nir_builder *b, nir_tex_instr *tex, nir_variable *var, struct z return NULL; } if (bit_size != dest_size) { - tex->dest.ssa.bit_size = bit_size; + tex->def.bit_size = bit_size; tex->dest_type = nir_get_nir_type_for_glsl_base_type(ret_type); if (is_int) { if (glsl_unsigned_base_type_of(ret_type) == ret_type) - dest = nir_u2uN(b, &tex->dest.ssa, dest_size); + dest = nir_u2uN(b, &tex->def, dest_size); else - dest = nir_i2iN(b, &tex->dest.ssa, dest_size); + dest = nir_i2iN(b, &tex->def, dest_size); } else { - dest = nir_f2fN(b, &tex->dest.ssa, dest_size); + dest = nir_f2fN(b, &tex->def, dest_size); } if (rewrite_depth) return dest; - nir_def_rewrite_uses_after(&tex->dest.ssa, dest, dest->parent_instr); + nir_def_rewrite_uses_after(&tex->def, dest, dest->parent_instr); } else if (rewrite_depth) { return dest; } @@ -3395,7 +3395,7 @@ lower_zs_swizzle_tex_instr(nir_builder *b, nir_instr *instr, void *data) const struct glsl_type *type = glsl_without_array(var->type); enum glsl_base_type ret_type = glsl_get_sampler_result_type(type); bool is_int = glsl_base_type_is_integer(ret_type); - unsigned num_components = tex->dest.ssa.num_components; + unsigned num_components = tex->def.num_components; if (tex->is_shadow) tex->is_new_style_shadow = true; nir_def *dest = rewrite_tex_dest(b, tex, var, NULL); @@ -3403,9 +3403,9 @@ lower_zs_swizzle_tex_instr(nir_builder *b, nir_instr *instr, void *data) if (!dest && !(swizzle_key->mask & BITFIELD_BIT(sampler_id))) return false; else if (!dest) - dest = &tex->dest.ssa; + dest = &tex->def; else - tex->dest.ssa.num_components = 1; + tex->def.num_components = 1; if (swizzle_key && (swizzle_key->mask & BITFIELD_BIT(sampler_id))) { /* these require manual swizzles */ if (tex->op == nir_texop_tg4) { @@ -3413,13 +3413,13 @@ lower_zs_swizzle_tex_instr(nir_builder *b, nir_instr *instr, void *data) nir_def *swizzle; switch (swizzle_key->swizzle[sampler_id].s[tex->component]) { case PIPE_SWIZZLE_0: - swizzle = nir_imm_zero(b, 4, tex->dest.ssa.bit_size); + swizzle = nir_imm_zero(b, 4, tex->def.bit_size); break; case PIPE_SWIZZLE_1: if (is_int) - swizzle = nir_imm_intN_t(b, 4, tex->dest.ssa.bit_size); + swizzle = nir_imm_intN_t(b, 4, tex->def.bit_size); else - swizzle = nir_imm_floatN_t(b, 4, tex->dest.ssa.bit_size); + swizzle = nir_imm_floatN_t(b, 4, tex->def.bit_size); break; default: if (!tex->component) @@ -3434,13 +3434,13 @@ lower_zs_swizzle_tex_instr(nir_builder *b, nir_instr *instr, void *data) for (unsigned i = 0; i < ARRAY_SIZE(vec); i++) { switch (swizzle_key->swizzle[sampler_id].s[i]) { case PIPE_SWIZZLE_0: - vec[i] = nir_imm_zero(b, 1, tex->dest.ssa.bit_size); + vec[i] = nir_imm_zero(b, 1, tex->def.bit_size); break; case PIPE_SWIZZLE_1: if (is_int) - vec[i] = nir_imm_intN_t(b, 1, tex->dest.ssa.bit_size); + vec[i] = nir_imm_intN_t(b, 1, tex->def.bit_size); else - vec[i] = nir_imm_floatN_t(b, 1, tex->dest.ssa.bit_size); + vec[i] = nir_imm_floatN_t(b, 1, tex->def.bit_size); break; default: vec[i] = dest->num_components == 1 ? dest : nir_channel(b, dest, i); @@ -3487,9 +3487,9 @@ invert_point_coord_instr(nir_builder *b, nir_instr *instr, void *data) if (intr->intrinsic != nir_intrinsic_load_point_coord) return false; b->cursor = nir_after_instr(instr); - nir_def *def = nir_vec2(b, nir_channel(b, &intr->dest.ssa, 0), - nir_fsub_imm(b, 1.0, nir_channel(b, &intr->dest.ssa, 1))); - nir_def_rewrite_uses_after(&intr->dest.ssa, def, def->parent_instr); + nir_def *def = nir_vec2(b, nir_channel(b, &intr->def, 0), + nir_fsub_imm(b, 1.0, nir_channel(b, &intr->def, 1))); + nir_def_rewrite_uses_after(&intr->def, def, def->parent_instr); return true; } @@ -3813,8 +3813,8 @@ lower_baseinstance_instr(nir_builder *b, nir_instr *instr, void *data) if (intr->intrinsic != nir_intrinsic_load_instance_id) return false; b->cursor = nir_after_instr(instr); - nir_def *def = nir_isub(b, &intr->dest.ssa, nir_load_base_instance(b)); - nir_def_rewrite_uses_after(&intr->dest.ssa, def, def->parent_instr); + nir_def *def = nir_isub(b, &intr->def, nir_load_base_instance(b)); + nir_def_rewrite_uses_after(&intr->def, def, def->parent_instr); return true; } @@ -4047,7 +4047,7 @@ lower_bindless_instr(nir_builder *b, nir_instr *in, void *data) nir_deref_instr *deref = nir_build_deref_var(b, var); if (glsl_type_is_array(var->type)) deref = nir_build_deref_array(b, deref, nir_u2uN(b, tex->src[idx].src.ssa, 32)); - nir_instr_rewrite_src_ssa(in, &tex->src[idx].src, &deref->dest.ssa); + nir_instr_rewrite_src_ssa(in, &tex->src[idx].src, &deref->def); /* bindless sampling uses the variable type directly, which means the tex instr has to exactly * match up with it in contrast to normal sampler ops where things are a bit more flexible; @@ -4101,7 +4101,7 @@ lower_bindless_instr(nir_builder *b, nir_instr *in, void *data) nir_deref_instr *deref = nir_build_deref_var(b, var); if (glsl_type_is_array(var->type)) deref = nir_build_deref_array(b, deref, nir_u2uN(b, instr->src[0].ssa, 32)); - nir_instr_rewrite_src_ssa(in, &instr->src[0], &deref->dest.ssa); + nir_instr_rewrite_src_ssa(in, &instr->src[0], &deref->def); return true; } @@ -4143,7 +4143,7 @@ lower_bindless_io_instr(nir_builder *b, nir_instr *in, void *data) if (instr->intrinsic == nir_intrinsic_load_deref) { nir_def *def = nir_load_deref(b, deref); nir_instr_rewrite_src_ssa(in, &instr->src[0], def); - nir_def_rewrite_uses(&instr->dest.ssa, def); + nir_def_rewrite_uses(&instr->def, def); } else { nir_store_deref(b, deref, instr->src[1].ssa, nir_intrinsic_write_mask(instr)); } @@ -4293,14 +4293,14 @@ convert_1d_shadow_tex(nir_builder *b, nir_instr *instr, void *data) } b->cursor = nir_after_instr(instr); unsigned needed_components = nir_tex_instr_dest_size(tex); - unsigned num_components = tex->dest.ssa.num_components; + unsigned num_components = tex->def.num_components; if (needed_components > num_components) { - tex->dest.ssa.num_components = needed_components; + tex->def.num_components = needed_components; assert(num_components < 3); /* take either xz or just x since this is promoted to 2D from 1D */ uint32_t mask = num_components == 2 ? (1|4) : 1; - nir_def *dst = nir_channels(b, &tex->dest.ssa, mask); - nir_def_rewrite_uses_after(&tex->dest.ssa, dst, dst->parent_instr); + nir_def *dst = nir_channels(b, &tex->def, mask); + nir_def_rewrite_uses_after(&tex->def, dst, dst->parent_instr); } return true; } @@ -4415,7 +4415,7 @@ lower_sparse_instr(nir_builder *b, nir_instr *in, void *data) else src1 = instr->src[1].ssa; nir_def *def = nir_iand(b, src0, src1); - nir_def_rewrite_uses_after(&instr->dest.ssa, def, in); + nir_def_rewrite_uses_after(&instr->def, def, in); nir_instr_remove(in); return true; } @@ -4444,13 +4444,13 @@ lower_sparse_instr(nir_builder *b, nir_instr *in, void *data) nir_alu_instr *alu = nir_instr_as_alu(parent); src = alu->src[0].src.ssa; } - if (instr->dest.ssa.bit_size != 32) { - if (instr->dest.ssa.bit_size == 1) + if (instr->def.bit_size != 32) { + if (instr->def.bit_size == 1) src = nir_ieq_imm(b, src, 1); else - src = nir_u2uN(b, src, instr->dest.ssa.bit_size); + src = nir_u2uN(b, src, instr->def.bit_size); } - nir_def_rewrite_uses(&instr->dest.ssa, src); + nir_def_rewrite_uses(&instr->def, src); nir_instr_remove(in); } return true; diff --git a/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c b/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c index f9f946f..13716bc 100644 --- a/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c +++ b/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c @@ -176,11 +176,11 @@ create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_def *coor s++; } - nir_def_init(&array_tex->instr, &array_tex->dest.ssa, + nir_def_init(&array_tex->instr, &array_tex->def, nir_tex_instr_dest_size(array_tex), - tex->dest.ssa.bit_size); + tex->def.bit_size); nir_builder_instr_insert(b, &array_tex->instr); - return &array_tex->dest.ssa; + return &array_tex->def; } static nir_def * @@ -446,11 +446,11 @@ lower_tex_to_txl(nir_builder *b, nir_tex_instr *tex) txl->src[s] = nir_tex_src_for_ssa(nir_tex_src_lod, lod); b->cursor = nir_before_instr(&tex->instr); - nir_def_init(&txl->instr, &txl->dest.ssa, - tex->dest.ssa.num_components, - tex->dest.ssa.bit_size); + nir_def_init(&txl->instr, &txl->def, + tex->def.num_components, + tex->def.bit_size); nir_builder_instr_insert(b, &txl->instr); - nir_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa); + nir_def_rewrite_uses(&tex->def, &txl->def); return txl; } @@ -482,14 +482,14 @@ lower_cube_txs(nir_builder *b, nir_tex_instr *tex) b->cursor = nir_after_instr(&tex->instr); rewrite_cube_var_type(b, tex); - unsigned num_components = tex->dest.ssa.num_components; + unsigned num_components = tex->def.num_components; /* force max components to unbreak textureSize().xy */ - tex->dest.ssa.num_components = 3; + tex->def.num_components = 3; tex->is_array = true; - nir_def *array_dim = nir_channel(b, &tex->dest.ssa, 2); + nir_def *array_dim = nir_channel(b, &tex->def, 2); nir_def *cube_array_dim = nir_idiv(b, array_dim, nir_imm_int(b, 6)); - nir_def *size = nir_vec3(b, nir_channel(b, &tex->dest.ssa, 0), - nir_channel(b, &tex->dest.ssa, 1), + nir_def *size = nir_vec3(b, nir_channel(b, &tex->def, 0), + nir_channel(b, &tex->def, 1), cube_array_dim); return nir_trim_vector(b, size, num_components); } diff --git a/src/gallium/frontends/clover/nir/invocation.cpp b/src/gallium/frontends/clover/nir/invocation.cpp index 5cbda58..be98a49 100644 --- a/src/gallium/frontends/clover/nir/invocation.cpp +++ b/src/gallium/frontends/clover/nir/invocation.cpp @@ -165,7 +165,7 @@ clover_lower_nir_instr(nir_builder *b, nir_instr *instr, void *_state) } return nir_u2uN(b, nir_vec(b, loads, state->global_dims), - intrinsic->dest.ssa.bit_size); + intrinsic->def.bit_size); } case nir_intrinsic_load_constant_base_ptr: { return nir_load_var(b, state->constant_var); diff --git a/src/gallium/frontends/lavapipe/lvp_inline_uniforms.c b/src/gallium/frontends/lavapipe/lvp_inline_uniforms.c index 44d57ad..fb876e1 100644 --- a/src/gallium/frontends/lavapipe/lvp_inline_uniforms.c +++ b/src/gallium/frontends/lavapipe/lvp_inline_uniforms.c @@ -192,8 +192,8 @@ lvp_inline_uniforms(nir_shader *nir, const struct lvp_shader *shader, const uint nir_src_as_uint(intr->src[0]) == ubo && nir_src_is_const(intr->src[1]) && /* TODO: Can't handle other bit sizes for now. */ - intr->dest.ssa.bit_size == 32) { - int num_components = intr->dest.ssa.num_components; + intr->def.bit_size == 32) { + int num_components = intr->def.num_components; uint32_t offset = nir_src_as_uint(intr->src[1]); const unsigned num_uniforms = shader->inlines.count[ubo]; const unsigned *uniform_dw_offsets = shader->inlines.uniform_offsets[ubo]; @@ -204,7 +204,7 @@ lvp_inline_uniforms(nir_shader *nir, const struct lvp_shader *shader, const uint if (offset == uniform_dw_offsets[i]) { b.cursor = nir_before_instr(&intr->instr); nir_def *def = nir_imm_int(&b, uniform_values[i]); - nir_def_rewrite_uses(&intr->dest.ssa, def); + nir_def_rewrite_uses(&intr->def, def); nir_instr_remove(&intr->instr); break; } @@ -236,7 +236,7 @@ lvp_inline_uniforms(nir_shader *nir, const struct lvp_shader *shader, const uint for (unsigned i = 0; i < num_components; i++) { if (!components[i]) { uint32_t scalar_offset = (offset + i) * 4; - components[i] = nir_load_ubo(&b, 1, intr->dest.ssa.bit_size, + components[i] = nir_load_ubo(&b, 1, intr->def.bit_size, intr->src[0].ssa, nir_imm_int(&b, scalar_offset)); nir_intrinsic_instr *load = @@ -248,7 +248,7 @@ lvp_inline_uniforms(nir_shader *nir, const struct lvp_shader *shader, const uint } /* Replace the original uniform load. */ - nir_def_rewrite_uses(&intr->dest.ssa, + nir_def_rewrite_uses(&intr->def, nir_vec(&b, components, num_components)); nir_instr_remove(&intr->instr); } diff --git a/src/gallium/frontends/rusticl/rusticl_nir.c b/src/gallium/frontends/rusticl/rusticl_nir.c index a67d736..7ec184b 100644 --- a/src/gallium/frontends/rusticl/rusticl_nir.c +++ b/src/gallium/frontends/rusticl/rusticl_nir.c @@ -68,7 +68,7 @@ rusticl_lower_intrinsics_instr( case nir_intrinsic_load_work_dim: assert(state->work_dim); return nir_u2uN(b, nir_load_var(b, state->work_dim), - intrins->dest.ssa.bit_size); + intrins->def.bit_size); default: return NULL; } @@ -95,9 +95,9 @@ rusticl_lower_input_instr(struct nir_builder *b, nir_instr *instr, void *_) nir_def *ubo_idx = nir_imm_int(b, 0); nir_def *uniform_offset = nir_ssa_for_src(b, intrins->src[0], 1); - assert(intrins->dest.ssa.bit_size >= 8); + assert(intrins->def.bit_size >= 8); nir_def *load_result = - nir_load_ubo(b, intrins->num_components, intrins->dest.ssa.bit_size, + nir_load_ubo(b, intrins->num_components, intrins->def.bit_size, ubo_idx, nir_iadd_imm(b, uniform_offset, nir_intrinsic_base(intrins))); nir_intrinsic_instr *load = nir_instr_as_intrinsic(load_result->parent_instr); diff --git a/src/imagination/rogue/nir/rogue_nir_lower_io.c b/src/imagination/rogue/nir/rogue_nir_lower_io.c index f2f8873..2a09339 100644 --- a/src/imagination/rogue/nir/rogue_nir_lower_io.c +++ b/src/imagination/rogue/nir/rogue_nir_lower_io.c @@ -49,7 +49,7 @@ static void lower_vulkan_resource_index(nir_builder *b, nir_imm_int(b, desc_set), nir_imm_int(b, binding), nir_imm_int(b, desc_type)); - nir_def_rewrite_uses(&intr->dest.ssa, def); + nir_def_rewrite_uses(&intr->def, def); nir_instr_remove(&intr->instr); } @@ -66,8 +66,8 @@ static void lower_load_global_constant_to_scalar(nir_builder *b, for (uint8_t i = 0; i < intr->num_components; i++) { nir_intrinsic_instr *chan_intr = nir_intrinsic_instr_create(b->shader, intr->intrinsic); - nir_def_init(&chan_intr->instr, &chan_intr->dest.ssa, 1, - intr->dest.ssa.bit_size); + nir_def_init(&chan_intr->instr, &chan_intr->def, 1, + intr->def.bit_size); chan_intr->num_components = 1; nir_intrinsic_set_access(chan_intr, nir_intrinsic_access(intr)); @@ -81,10 +81,10 @@ static void lower_load_global_constant_to_scalar(nir_builder *b, nir_builder_instr_insert(b, &chan_intr->instr); - loads[i] = &chan_intr->dest.ssa; + loads[i] = &chan_intr->def; } - nir_def_rewrite_uses(&intr->dest.ssa, + nir_def_rewrite_uses(&intr->def, nir_vec(b, loads, intr->num_components)); nir_instr_remove(&intr->instr); } diff --git a/src/imagination/rogue/rogue_compile.c b/src/imagination/rogue/rogue_compile.c index 2a204d8..2019e0a 100644 --- a/src/imagination/rogue/rogue_compile.c +++ b/src/imagination/rogue/rogue_compile.c @@ -144,10 +144,10 @@ static void trans_nir_intrinsic_load_input_fs(rogue_builder *b, { struct rogue_fs_build_data *fs_data = &b->shader->ctx->stage_data.fs; - unsigned load_size = intr->dest.ssa.num_components; + unsigned load_size = intr->def.num_components; assert(load_size == 1); /* TODO: We can support larger load sizes. */ - rogue_reg *dst = rogue_ssa_reg(b->shader, intr->dest.ssa.index); + rogue_reg *dst = rogue_ssa_reg(b->shader, intr->def.index); struct nir_io_semantics io_semantics = nir_intrinsic_io_semantics(intr); unsigned component = nir_intrinsic_component(intr); @@ -178,10 +178,10 @@ static void trans_nir_intrinsic_load_input_vs(rogue_builder *b, struct pvr_pipeline_layout *pipeline_layout = b->shader->ctx->pipeline_layout; - ASSERTED unsigned load_size = intr->dest.ssa.num_components; + ASSERTED unsigned load_size = intr->def.num_components; assert(load_size == 1); /* TODO: We can support larger load sizes. */ - rogue_reg *dst = rogue_ssa_reg(b->shader, intr->dest.ssa.index); + rogue_reg *dst = rogue_ssa_reg(b->shader, intr->def.index); struct nir_io_semantics io_semantics = nir_intrinsic_io_semantics(intr); unsigned input = io_semantics.location - VERT_ATTRIB_GENERIC0; @@ -520,7 +520,7 @@ trans_nir_intrinsic_load_vulkan_descriptor(rogue_builder *b, rogue_ref_reg(desc_addr_offset_val_hi), rogue_ref_io(ROGUE_IO_NONE)); - unsigned desc_addr_idx = intr->dest.ssa.index; + unsigned desc_addr_idx = intr->def.index; rogue_regarray *desc_addr_64 = rogue_ssa_vec_regarray(b->shader, 2, desc_addr_idx, 0); instr = &rogue_LD(b, @@ -540,7 +540,7 @@ static void trans_nir_intrinsic_load_global_constant(rogue_builder *b, rogue_regarray *src = rogue_ssa_vec_regarray(b->shader, 2, src_index, 0); /*** TODO NEXT: this could be either a reg or regarray. ***/ - rogue_reg *dst = rogue_ssa_reg(b->shader, intr->dest.ssa.index); + rogue_reg *dst = rogue_ssa_reg(b->shader, intr->def.index); /* TODO NEXT: src[1] should be depending on ssa vec size for burst loads */ rogue_instr *instr = &rogue_LD(b, diff --git a/src/intel/blorp/blorp.c b/src/intel/blorp/blorp.c index 657b34e..dfbc6f9 100644 --- a/src/intel/blorp/blorp.c +++ b/src/intel/blorp/blorp.c @@ -356,7 +356,7 @@ lower_base_workgroup_id(nir_builder *b, nir_instr *instr, UNUSED void *data) return false; b->cursor = nir_instr_remove(&intrin->instr); - nir_def_rewrite_uses(&intrin->dest.ssa, nir_imm_zero(b, 3, 32)); + nir_def_rewrite_uses(&intrin->def, nir_imm_zero(b, 3, 32)); return true; } diff --git a/src/intel/blorp/blorp_blit.c b/src/intel/blorp/blorp_blit.c index 28f5834..baa1535 100644 --- a/src/intel/blorp/blorp_blit.c +++ b/src/intel/blorp/blorp_blit.c @@ -163,7 +163,7 @@ blorp_create_nir_tex_instr(nir_builder *b, struct brw_blorp_blit_vars *v, tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_coord, pos); tex->coord_components = 3; - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); return tex; } @@ -189,7 +189,7 @@ blorp_nir_tex(nir_builder *b, struct brw_blorp_blit_vars *v, nir_builder_instr_insert(b, &tex->instr); - return &tex->dest.ssa; + return &tex->def; } static nir_def * @@ -204,7 +204,7 @@ blorp_nir_txf(nir_builder *b, struct brw_blorp_blit_vars *v, nir_builder_instr_insert(b, &tex->instr); - return &tex->dest.ssa; + return &tex->def; } static nir_def * @@ -231,7 +231,7 @@ blorp_nir_txf_ms(nir_builder *b, struct brw_blorp_blit_vars *v, nir_builder_instr_insert(b, &tex->instr); - return &tex->dest.ssa; + return &tex->def; } static nir_def * @@ -246,7 +246,7 @@ blorp_blit_txf_ms_mcs(nir_builder *b, struct brw_blorp_blit_vars *v, nir_builder_instr_insert(b, &tex->instr); - return &tex->dest.ssa; + return &tex->def; } /** diff --git a/src/intel/blorp/blorp_nir_builder.h b/src/intel/blorp/blorp_nir_builder.h index 936bc12..d40f702 100644 --- a/src/intel/blorp/blorp_nir_builder.h +++ b/src/intel/blorp/blorp_nir_builder.h @@ -61,10 +61,10 @@ blorp_nir_txf_ms_mcs(nir_builder *b, nir_def *xy_pos, nir_def *layer) tex->texture_index = 0; tex->sampler_index = 0; - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(b, &tex->instr); - return &tex->dest.ssa; + return &tex->def; } static inline nir_def * diff --git a/src/intel/compiler/brw_fs.cpp b/src/intel/compiler/brw_fs.cpp index 3794741..5e9f798 100644 --- a/src/intel/compiler/brw_fs.cpp +++ b/src/intel/compiler/brw_fs.cpp @@ -7404,7 +7404,7 @@ brw_compute_barycentric_interp_modes(const struct intel_device_info *devinfo, } /* Ignore WPOS; it doesn't require interpolation. */ - if (!is_used_in_not_interp_frag_coord(&intrin->dest.ssa)) + if (!is_used_in_not_interp_frag_coord(&intrin->def)) continue; nir_intrinsic_op bary_op = intrin->intrinsic; diff --git a/src/intel/compiler/brw_fs_nir.cpp b/src/intel/compiler/brw_fs_nir.cpp index 0a5fdf1..c55b733 100644 --- a/src/intel/compiler/brw_fs_nir.cpp +++ b/src/intel/compiler/brw_fs_nir.cpp @@ -2016,7 +2016,7 @@ fs_visitor::get_nir_src(const nir_src &src) /* We don't handle indirects on locals */ assert(nir_intrinsic_base(load_reg) == 0); assert(load_reg->intrinsic != nir_intrinsic_load_reg_indirect); - reg = nir_ssa_values[decl_reg->dest.ssa.index]; + reg = nir_ssa_values[decl_reg->def.index]; } if (nir_src_bit_size(src) == 64 && devinfo->ver == 7) { @@ -2071,7 +2071,7 @@ fs_visitor::get_nir_def(const nir_def &def) /* We don't handle indirects on locals */ assert(nir_intrinsic_base(store_reg) == 0); assert(store_reg->intrinsic != nir_intrinsic_store_reg_indirect); - return nir_ssa_values[decl_reg->dest.ssa.index]; + return nir_ssa_values[decl_reg->def.index]; } } @@ -2615,7 +2615,7 @@ fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld, fs_reg dest; if (nir_intrinsic_infos[instr->intrinsic].has_dest) - dest = get_nir_def(instr->dest.ssa); + dest = get_nir_def(instr->def); switch (instr->intrinsic) { case nir_intrinsic_load_vertex_id: @@ -2623,7 +2623,7 @@ fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld, unreachable("should be lowered by nir_lower_system_values()"); case nir_intrinsic_load_input: { - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); fs_reg src = fs_reg(ATTR, nir_intrinsic_base(instr) * 4, dest.type); src = offset(src, bld, nir_intrinsic_component(instr)); src = offset(src, bld, nir_src_as_uint(instr->src[0])); @@ -2750,7 +2750,7 @@ fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld, fs_reg dst; if (nir_intrinsic_infos[instr->intrinsic].has_dest) - dst = get_nir_def(instr->dest.ssa); + dst = get_nir_def(instr->def); switch (instr->intrinsic) { case nir_intrinsic_load_primitive_id: @@ -2774,7 +2774,7 @@ fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld, break; case nir_intrinsic_load_per_vertex_input: { - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); fs_reg indirect_offset = get_indirect_offset(instr); unsigned imm_offset = nir_intrinsic_base(instr); fs_inst *inst; @@ -2851,7 +2851,7 @@ fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld, case nir_intrinsic_load_output: case nir_intrinsic_load_per_vertex_output: { - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); fs_reg indirect_offset = get_indirect_offset(instr); unsigned imm_offset = nir_intrinsic_base(instr); unsigned first_component = nir_intrinsic_component(instr); @@ -2980,7 +2980,7 @@ fs_visitor::nir_emit_tes_intrinsic(const fs_builder &bld, fs_reg dest; if (nir_intrinsic_infos[instr->intrinsic].has_dest) - dest = get_nir_def(instr->dest.ssa); + dest = get_nir_def(instr->def); switch (instr->intrinsic) { case nir_intrinsic_load_primitive_id: @@ -2994,7 +2994,7 @@ fs_visitor::nir_emit_tes_intrinsic(const fs_builder &bld, case nir_intrinsic_load_input: case nir_intrinsic_load_per_vertex_input: { - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); fs_reg indirect_offset = get_indirect_offset(instr); unsigned imm_offset = nir_intrinsic_base(instr); unsigned first_component = nir_intrinsic_component(instr); @@ -3088,7 +3088,7 @@ fs_visitor::nir_emit_gs_intrinsic(const fs_builder &bld, fs_reg dest; if (nir_intrinsic_infos[instr->intrinsic].has_dest) - dest = get_nir_def(instr->dest.ssa); + dest = get_nir_def(instr->def); switch (instr->intrinsic) { case nir_intrinsic_load_primitive_id: @@ -3312,7 +3312,7 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld, fs_reg dest; if (nir_intrinsic_infos[instr->intrinsic].has_dest) - dest = get_nir_def(instr->dest.ssa); + dest = get_nir_def(instr->def); switch (instr->intrinsic) { case nir_intrinsic_load_front_face: @@ -3481,7 +3481,7 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld, /* In Fragment Shaders load_input is used either for flat inputs or * per-primitive inputs. */ - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); unsigned base = nir_intrinsic_base(instr); unsigned comp = nir_intrinsic_component(instr); unsigned num_components = instr->num_components; @@ -3686,7 +3686,7 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld, fs_reg dest; if (nir_intrinsic_infos[instr->intrinsic].has_dest) - dest = get_nir_def(instr->dest.ssa); + dest = get_nir_def(instr->def); switch (instr->intrinsic) { case nir_intrinsic_barrier: @@ -3732,7 +3732,7 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld, } case nir_intrinsic_load_num_workgroups: { - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); cs_prog_data->uses_num_work_groups = true; @@ -3758,7 +3758,7 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld, case nir_intrinsic_load_shared: { assert(devinfo->ver >= 7); - const unsigned bit_size = instr->dest.ssa.bit_size; + const unsigned bit_size = instr->def.bit_size; fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GFX7_BTI_SLM); @@ -3783,14 +3783,14 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld, assert(nir_intrinsic_align(instr) > 0); if (bit_size == 32 && nir_intrinsic_align(instr) >= 4) { - assert(instr->dest.ssa.num_components <= 4); + assert(instr->def.num_components <= 4); srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components); fs_inst *inst = bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL, dest, srcs, SURFACE_LOGICAL_NUM_SRCS); inst->size_written = instr->num_components * dispatch_width * 4; } else { - assert(instr->dest.ssa.num_components == 1); + assert(instr->def.num_components == 1); srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size); fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD); @@ -3898,7 +3898,7 @@ fs_visitor::nir_emit_bs_intrinsic(const fs_builder &bld, fs_reg dest; if (nir_intrinsic_infos[instr->intrinsic].has_dest) - dest = get_nir_def(instr->dest.ssa); + dest = get_nir_def(instr->def); switch (instr->intrinsic) { case nir_intrinsic_load_btd_global_arg_addr_intel: @@ -4332,36 +4332,36 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr BRW_REGISTER_TYPE_F); /* Re-use the destination's slot in the table for the register */ - nir_ssa_values[instr->dest.ssa.index] = + nir_ssa_values[instr->def.index] = bld.vgrf(reg_type, num_components); return; } fs_reg dest; if (nir_intrinsic_infos[instr->intrinsic].has_dest) - dest = get_nir_def(instr->dest.ssa); + dest = get_nir_def(instr->def); switch (instr->intrinsic) { case nir_intrinsic_resource_intel: - nir_ssa_bind_infos[instr->dest.ssa.index].valid = true; - nir_ssa_bind_infos[instr->dest.ssa.index].bindless = + nir_ssa_bind_infos[instr->def.index].valid = true; + nir_ssa_bind_infos[instr->def.index].bindless = (nir_intrinsic_resource_access_intel(instr) & nir_resource_intel_bindless) != 0; - nir_ssa_bind_infos[instr->dest.ssa.index].block = + nir_ssa_bind_infos[instr->def.index].block = nir_intrinsic_resource_block_intel(instr); - nir_ssa_bind_infos[instr->dest.ssa.index].set = + nir_ssa_bind_infos[instr->def.index].set = nir_intrinsic_desc_set(instr); - nir_ssa_bind_infos[instr->dest.ssa.index].binding = + nir_ssa_bind_infos[instr->def.index].binding = nir_intrinsic_binding(instr); if (nir_intrinsic_resource_access_intel(instr) & nir_resource_intel_non_uniform) { - nir_resource_values[instr->dest.ssa.index] = fs_reg(); + nir_resource_values[instr->def.index] = fs_reg(); } else { - nir_resource_values[instr->dest.ssa.index] = + nir_resource_values[instr->def.index] = try_rebuild_resource(bld, instr->src[1].ssa); } - nir_ssa_values[instr->dest.ssa.index] = + nir_ssa_values[instr->def.index] = nir_ssa_values[instr->src[1].ssa->index]; break; @@ -4483,7 +4483,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr tmp, srcs, ARRAY_SIZE(srcs)); inst->size_written = 4 * REG_SIZE; - for (unsigned c = 0; c < instr->dest.ssa.num_components; ++c) { + for (unsigned c = 0; c < instr->def.num_components; ++c) { bld.MOV(offset(retype(dest, tmp.type), bld, c), component(offset(tmp, ubld, c), 0)); } @@ -4889,7 +4889,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surface, surface_handle, base_offset, i * type_sz(dest.type), - instr->dest.ssa.bit_size / 8); + instr->def.bit_size / 8); prog_data->has_ubo_pull = true; } else { @@ -5021,7 +5021,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr case nir_intrinsic_load_global_constant: { assert(devinfo->ver >= 8); - assert(instr->dest.ssa.bit_size <= 32); + assert(instr->def.bit_size <= 32); assert(nir_intrinsic_align(instr) > 0); fs_reg srcs[A64_LOGICAL_NUM_SRCS]; srcs[A64_LOGICAL_ADDRESS] = get_nir_src(instr->src[0]); @@ -5029,9 +5029,9 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr srcs[A64_LOGICAL_ENABLE_HELPERS] = brw_imm_ud(nir_intrinsic_access(instr) & ACCESS_INCLUDE_HELPERS); - if (instr->dest.ssa.bit_size == 32 && + if (instr->def.bit_size == 32 && nir_intrinsic_align(instr) >= 4) { - assert(instr->dest.ssa.num_components <= 4); + assert(instr->def.num_components <= 4); srcs[A64_LOGICAL_ARG] = brw_imm_ud(instr->num_components); @@ -5041,8 +5041,8 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr inst->size_written = instr->num_components * inst->dst.component_size(inst->exec_size); } else { - const unsigned bit_size = instr->dest.ssa.bit_size; - assert(instr->dest.ssa.num_components == 1); + const unsigned bit_size = instr->def.bit_size; + assert(instr->def.num_components == 1); fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD); srcs[A64_LOGICAL_ARG] = brw_imm_ud(bit_size); @@ -5099,7 +5099,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr break; case nir_intrinsic_load_global_const_block_intel: { - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); assert(instr->num_components == 8 || instr->num_components == 16); const fs_builder ubld = bld.exec_all().group(instr->num_components, 0); @@ -5204,7 +5204,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr case nir_intrinsic_load_ssbo: { assert(devinfo->ver >= 7); - const unsigned bit_size = instr->dest.ssa.bit_size; + const unsigned bit_size = instr->def.bit_size; fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; srcs[get_nir_src_bindless(instr->src[0]) ? SURFACE_LOGICAL_SRC_SURFACE_HANDLE : @@ -5222,14 +5222,14 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr assert(nir_intrinsic_align(instr) > 0); if (bit_size == 32 && nir_intrinsic_align(instr) >= 4) { - assert(instr->dest.ssa.num_components <= 4); + assert(instr->def.num_components <= 4); srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components); fs_inst *inst = bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL, dest, srcs, SURFACE_LOGICAL_NUM_SRCS); inst->size_written = instr->num_components * dispatch_width * 4; } else { - assert(instr->dest.ssa.num_components == 1); + assert(instr->def.num_components == 1); srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size); fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD); @@ -5431,8 +5431,8 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr case nir_intrinsic_load_scratch: { assert(devinfo->ver >= 7); - assert(instr->dest.ssa.num_components == 1); - const unsigned bit_size = instr->dest.ssa.bit_size; + assert(instr->def.num_components == 1); + const unsigned bit_size = instr->def.bit_size; fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; if (devinfo->verx10 >= 125) { @@ -5458,7 +5458,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr dest.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD); /* Read the vector */ - assert(instr->dest.ssa.num_components == 1); + assert(instr->def.num_components == 1); assert(bit_size <= 32); assert(nir_intrinsic_align(instr) > 0); if (bit_size == 32 && @@ -5712,7 +5712,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr bld.exec_all().group(1, 0).MOV(flag, brw_imm_ud(0u)); bld.CMP(bld.null_reg_ud(), value, brw_imm_ud(0u), BRW_CONDITIONAL_NZ); - if (instr->dest.ssa.bit_size > 32) { + if (instr->def.bit_size > 32) { dest.type = BRW_REGISTER_TYPE_UQ; } else { dest.type = BRW_REGISTER_TYPE_UD; @@ -5947,7 +5947,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr } case nir_intrinsic_load_global_block_intel: { - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); fs_reg address = bld.emit_uniformize(get_nir_src(instr->src[0])); @@ -6021,7 +6021,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr case nir_intrinsic_load_shared_block_intel: case nir_intrinsic_load_ssbo_block_intel: { - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); const bool is_ssbo = instr->intrinsic == nir_intrinsic_load_ssbo_block_intel; @@ -6287,12 +6287,12 @@ fs_visitor::nir_emit_surface_atomic(const fs_builder &bld, * * 16-bit float atomics are supported, however. */ - assert(instr->dest.ssa.bit_size == 32 || - (instr->dest.ssa.bit_size == 64 && devinfo->has_lsc) || - (instr->dest.ssa.bit_size == 16 && + assert(instr->def.bit_size == 32 || + (instr->def.bit_size == 64 && devinfo->has_lsc) || + (instr->def.bit_size == 16 && (devinfo->has_lsc || lsc_opcode_is_atomic_float(op)))); - fs_reg dest = get_nir_def(instr->dest.ssa); + fs_reg dest = get_nir_def(instr->def); fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; srcs[bindless ? @@ -6336,7 +6336,7 @@ fs_visitor::nir_emit_surface_atomic(const fs_builder &bld, /* Emit the actual atomic operation */ - switch (instr->dest.ssa.bit_size) { + switch (instr->def.bit_size) { case 16: { fs_reg dest32 = bld.vgrf(BRW_REGISTER_TYPE_UD); bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL, @@ -6363,7 +6363,7 @@ fs_visitor::nir_emit_global_atomic(const fs_builder &bld, { int op = lsc_aop_for_nir_intrinsic(instr); - fs_reg dest = get_nir_def(instr->dest.ssa); + fs_reg dest = get_nir_def(instr->def); fs_reg addr = get_nir_src(instr->src[0]); @@ -6387,7 +6387,7 @@ fs_visitor::nir_emit_global_atomic(const fs_builder &bld, srcs[A64_LOGICAL_ARG] = brw_imm_ud(op); srcs[A64_LOGICAL_ENABLE_HELPERS] = brw_imm_ud(0); - switch (instr->dest.ssa.bit_size) { + switch (instr->def.bit_size) { case 16: { fs_reg dest32 = bld.vgrf(BRW_REGISTER_TYPE_UD); bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL, @@ -6644,7 +6644,7 @@ fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr) opcode = SHADER_OPCODE_SAMPLEINFO_LOGICAL; break; case nir_texop_samples_identical: { - fs_reg dst = retype(get_nir_def(instr->dest.ssa), BRW_REGISTER_TYPE_D); + fs_reg dst = retype(get_nir_def(instr->def), BRW_REGISTER_TYPE_D); /* If mcs is an immediate value, it means there is no MCS. In that case * just return false. @@ -6685,7 +6685,7 @@ fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr) const unsigned dest_size = nir_tex_instr_dest_size(instr); if (devinfo->ver >= 9 && instr->op != nir_texop_tg4 && instr->op != nir_texop_query_levels) { - unsigned write_mask = nir_def_components_read(&instr->dest.ssa); + unsigned write_mask = nir_def_components_read(&instr->def); assert(write_mask != 0); /* dead code should have been eliminated */ if (instr->is_sparse) { inst->size_written = (util_last_bit(write_mask) - 1) * @@ -6736,7 +6736,7 @@ fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr) if (instr->is_sparse) nir_dest[dest_size - 1] = component(offset(dst, bld, dest_size - 1), 0); - bld.LOAD_PAYLOAD(get_nir_def(instr->dest.ssa), nir_dest, dest_size, 0); + bld.LOAD_PAYLOAD(get_nir_def(instr->def), nir_dest, dest_size, 0); } void diff --git a/src/intel/compiler/brw_kernel.c b/src/intel/compiler/brw_kernel.c index 721dbb8..ea0aa2d 100644 --- a/src/intel/compiler/brw_kernel.c +++ b/src/intel/compiler/brw_kernel.c @@ -86,15 +86,15 @@ implement_atomic_builtin(nir_function *func, nir_atomic_op atomic_op, if (i == 0) { /* The first source is our deref */ assert(nir_intrinsic_infos[op].src_components[i] == -1); - src = &nir_build_deref_cast(&b, src, mode, data_type, 0)->dest.ssa; + src = &nir_build_deref_cast(&b, src, mode, data_type, 0)->def; } atomic->src[i] = nir_src_for_ssa(src); } - nir_def_init_for_type(&atomic->instr, &atomic->dest.ssa, data_type); + nir_def_init_for_type(&atomic->instr, &atomic->def, data_type); nir_builder_instr_insert(&b, &atomic->instr); - nir_store_deref(&b, ret, &atomic->dest.ssa, ~0); + nir_store_deref(&b, ret, &atomic->def, ~0); } static void @@ -110,10 +110,10 @@ implement_sub_group_ballot_builtin(nir_function *func) nir_intrinsic_instr_create(b.shader, nir_intrinsic_ballot); ballot->src[0] = nir_src_for_ssa(cond); ballot->num_components = 1; - nir_def_init(&ballot->instr, &ballot->dest.ssa, 1, 32); + nir_def_init(&ballot->instr, &ballot->def, 1, 32); nir_builder_instr_insert(&b, &ballot->instr); - nir_store_deref(&b, ret, &ballot->dest.ssa, ~0); + nir_store_deref(&b, ret, &ballot->def, ~0); } static bool @@ -182,12 +182,12 @@ lower_kernel_intrinsics(nir_shader *nir) load->src[0] = nir_src_for_ssa(nir_u2u32(&b, intrin->src[0].ssa)); nir_intrinsic_set_base(load, kernel_arg_start); nir_intrinsic_set_range(load, nir->num_uniforms); - nir_def_init(&load->instr, &load->dest.ssa, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); + nir_def_init(&load->instr, &load->def, + intrin->def.num_components, + intrin->def.bit_size); nir_builder_instr_insert(&b, &load->instr); - nir_def_rewrite_uses(&intrin->dest.ssa, &load->dest.ssa); + nir_def_rewrite_uses(&intrin->def, &load->def); progress = true; break; } @@ -197,7 +197,7 @@ lower_kernel_intrinsics(nir_shader *nir) nir_def *const_data_base_addr = nir_pack_64_2x32_split(&b, nir_load_reloc_const_intel(&b, BRW_SHADER_RELOC_CONST_DATA_ADDR_LOW), nir_load_reloc_const_intel(&b, BRW_SHADER_RELOC_CONST_DATA_ADDR_HIGH)); - nir_def_rewrite_uses(&intrin->dest.ssa, const_data_base_addr); + nir_def_rewrite_uses(&intrin->def, const_data_base_addr); progress = true; break; } @@ -212,14 +212,14 @@ lower_kernel_intrinsics(nir_shader *nir) nir_intrinsic_set_base(load, kernel_sysvals_start + offsetof(struct brw_kernel_sysvals, num_work_groups)); nir_intrinsic_set_range(load, 3 * 4); - nir_def_init(&load->instr, &load->dest.ssa, 3, 32); + nir_def_init(&load->instr, &load->def, 3, 32); nir_builder_instr_insert(&b, &load->instr); /* We may need to do a bit-size cast here */ nir_def *num_work_groups = - nir_u2uN(&b, &load->dest.ssa, intrin->dest.ssa.bit_size); + nir_u2uN(&b, &load->def, intrin->def.bit_size); - nir_def_rewrite_uses(&intrin->dest.ssa, num_work_groups); + nir_def_rewrite_uses(&intrin->def, num_work_groups); progress = true; break; } diff --git a/src/intel/compiler/brw_mesh.cpp b/src/intel/compiler/brw_mesh.cpp index e5ae3f2..5c84c1c 100644 --- a/src/intel/compiler/brw_mesh.cpp +++ b/src/intel/compiler/brw_mesh.cpp @@ -54,8 +54,8 @@ brw_nir_lower_load_uniforms_impl(nir_builder *b, nir_instr *instr, /* Read the first few 32-bit scalars from InlineData. */ if (nir_src_is_const(intrin->src[0]) && - intrin->dest.ssa.bit_size == 32 && - intrin->dest.ssa.num_components == 1) { + intrin->def.bit_size == 32 && + intrin->def.num_components == 1) { unsigned off = nir_intrinsic_base(intrin) + nir_src_as_uint(intrin->src[0]); unsigned off_dw = off / 4; if (off % 4 == 0 && off_dw < BRW_TASK_MESH_PUSH_CONSTANTS_SIZE_DW) { @@ -1383,7 +1383,7 @@ brw_pack_primitive_indices_instr(nir_builder *b, nir_instr *instr, void *data) nir_ishl_imm(b, nir_channel(b, data_def, 2), 16)); } - nir_build_store_deref(b, &new_array_deref->dest.ssa, new_data); + nir_build_store_deref(b, &new_array_deref->def, new_data); nir_instr_remove(instr); @@ -1777,9 +1777,9 @@ static void emit_urb_direct_reads(const fs_builder &bld, nir_intrinsic_instr *instr, const fs_reg &dest, fs_reg urb_handle) { - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); - unsigned comps = instr->dest.ssa.num_components; + unsigned comps = instr->def.num_components; if (comps == 0) return; @@ -1819,9 +1819,9 @@ static void emit_urb_indirect_reads(const fs_builder &bld, nir_intrinsic_instr *instr, const fs_reg &dest, const fs_reg &offset_src, fs_reg urb_handle) { - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); - unsigned comps = instr->dest.ssa.num_components; + unsigned comps = instr->def.num_components; if (comps == 0) return; @@ -1914,7 +1914,7 @@ void fs_visitor::emit_task_mesh_load(const fs_builder &bld, nir_intrinsic_instr *instr, const fs_reg &urb_handle) { - fs_reg dest = get_nir_def(instr->dest.ssa); + fs_reg dest = get_nir_def(instr->def); nir_src *offset_nir_src = nir_get_io_offset_src(instr); /* TODO(mesh): for per_vertex and per_primitive, if we could keep around @@ -1991,7 +1991,7 @@ fs_visitor::nir_emit_task_mesh_intrinsic(const fs_builder &bld, fs_reg dest; if (nir_intrinsic_infos[instr->intrinsic].has_dest) - dest = get_nir_def(instr->dest.ssa); + dest = get_nir_def(instr->def); switch (instr->intrinsic) { case nir_intrinsic_load_mesh_inline_data_intel: { diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c index d9e0170..3eb06fc 100644 --- a/src/intel/compiler/brw_nir.c +++ b/src/intel/compiler/brw_nir.c @@ -43,7 +43,7 @@ remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr, if (write) { assert(intr->num_components == intr->src[0].ssa->num_components); } else { - assert(intr->num_components == intr->dest.ssa.num_components); + assert(intr->num_components == intr->def.num_components); } if (location == VARYING_SLOT_TESS_LEVEL_INNER) { @@ -65,14 +65,14 @@ remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr, nir_def *y = nir_channel(b, intr->src[0].ssa, 1); src = nir_vec4(b, undef, undef, y, x); mask = !!(mask & WRITEMASK_X) << 3 | !!(mask & WRITEMASK_Y) << 2; - } else if (intr->dest.ssa.num_components > 1) { - assert(intr->dest.ssa.num_components == 2); + } else if (intr->def.num_components > 1) { + assert(intr->def.num_components == 2); intr->num_components = 4; - intr->dest.ssa.num_components = 4; + intr->def.num_components = 4; unsigned wz[2] = { 3, 2 }; - dest = nir_swizzle(b, &intr->dest.ssa, wz, 2); + dest = nir_swizzle(b, &intr->def, wz, 2); } else { nir_intrinsic_set_component(intr, 3 - component); } @@ -112,11 +112,11 @@ remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr, /* Don't overwrite the inner factor at DWord 4 for triangles */ if (_primitive_mode == TESS_PRIMITIVE_TRIANGLES) mask &= ~WRITEMASK_X; - } else if (intr->dest.ssa.num_components > 1) { - assert(intr->dest.ssa.num_components == 4); + } else if (intr->def.num_components > 1) { + assert(intr->def.num_components == 4); unsigned wzyx[4] = { 3, 2, 1, 0 }; - dest = nir_swizzle(b, &intr->dest.ssa, wzyx, 4); + dest = nir_swizzle(b, &intr->def, wzyx, 4); } else { nir_intrinsic_set_component(intr, 3 - component); out_of_bounds = component == 3 && @@ -147,7 +147,7 @@ remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr, if (out_of_bounds) { if (!write) - nir_def_rewrite_uses(&intr->dest.ssa, nir_undef(b, 1, 32)); + nir_def_rewrite_uses(&intr->def, nir_undef(b, 1, 32)); nir_instr_remove(&intr->instr); } else if (write) { nir_intrinsic_set_write_mask(intr, mask); @@ -157,7 +157,7 @@ remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr, nir_src_for_ssa(src)); } } else if (dest) { - nir_def_rewrite_uses_after(&intr->dest.ssa, dest, + nir_def_rewrite_uses_after(&intr->def, dest, dest->parent_instr); } @@ -328,11 +328,11 @@ brw_nir_lower_vs_inputs(nir_shader *nir, } load->num_components = 1; - nir_def_init(&load->instr, &load->dest.ssa, 1, 32); + nir_def_init(&load->instr, &load->def, 1, 32); nir_builder_instr_insert(&b, &load->instr); - nir_def_rewrite_uses(&intrin->dest.ssa, - &load->dest.ssa); + nir_def_rewrite_uses(&intrin->def, + &load->def); nir_instr_remove(&intrin->instr); break; } @@ -456,7 +456,7 @@ lower_barycentric_per_sample(nir_builder *b, nir_def *centroid = nir_load_barycentric(b, nir_intrinsic_load_barycentric_sample, nir_intrinsic_interp_mode(intrin)); - nir_def_rewrite_uses(&intrin->dest.ssa, centroid); + nir_def_rewrite_uses(&intrin->def, centroid); nir_instr_remove(instr); return true; } @@ -849,7 +849,7 @@ lower_bit_size_callback(const nir_instr *instr, UNUSED void *data) * to do if we were trying to do it in native 8-bit types and the * results are the same once we truncate to 8 bits at the end. */ - if (intrin->dest.ssa.bit_size == 8) + if (intrin->def.bit_size == 8) return 16; return 0; @@ -861,7 +861,7 @@ lower_bit_size_callback(const nir_instr *instr, UNUSED void *data) case nir_instr_type_phi: { nir_phi_instr *phi = nir_instr_as_phi(instr); - if (phi->dest.ssa.bit_size == 8) + if (phi->def.bit_size == 8) return 16; return 0; } @@ -1077,7 +1077,7 @@ brw_nir_zero_inputs_instr(struct nir_builder *b, nir_instr *instr, void *data) nir_def *zero = nir_imm_zero(b, 1, 32); - nir_def_rewrite_uses(&intrin->dest.ssa, zero); + nir_def_rewrite_uses(&intrin->def, zero); nir_instr_remove(instr); @@ -2053,7 +2053,7 @@ brw_nir_load_global_const(nir_builder *b, nir_intrinsic_instr *load_uniform, { assert(load_uniform->intrinsic == nir_intrinsic_load_uniform); - unsigned bit_size = load_uniform->dest.ssa.bit_size; + unsigned bit_size = load_uniform->def.bit_size; assert(bit_size >= 8 && bit_size % 8 == 0); unsigned byte_size = bit_size / 8; nir_def *sysval; diff --git a/src/intel/compiler/brw_nir_analyze_ubo_ranges.c b/src/intel/compiler/brw_nir_analyze_ubo_ranges.c index 5715c8c..c972b89 100644 --- a/src/intel/compiler/brw_nir_analyze_ubo_ranges.c +++ b/src/intel/compiler/brw_nir_analyze_ubo_ranges.c @@ -158,7 +158,7 @@ analyze_ubos_block(struct ubo_analysis_state *state, nir_block *block) /* The value might span multiple 32-byte chunks. */ const int bytes = nir_intrinsic_dest_components(intrin) * - (intrin->dest.ssa.bit_size / 8); + (intrin->def.bit_size / 8); const int start = ROUND_DOWN_TO(byte_offset, 32); const int end = ALIGN(byte_offset + bytes, 32); const int chunks = (end - start) / 32; diff --git a/src/intel/compiler/brw_nir_attribute_workarounds.c b/src/intel/compiler/brw_nir_attribute_workarounds.c index c150d6e..12c30fd 100644 --- a/src/intel/compiler/brw_nir_attribute_workarounds.c +++ b/src/intel/compiler/brw_nir_attribute_workarounds.c @@ -48,7 +48,7 @@ apply_attr_wa_instr(nir_builder *b, nir_instr *instr, void *cb_data) b->cursor = nir_after_instr(instr); - nir_def *val = &intrin->dest.ssa; + nir_def *val = &intrin->def; /* Do GL_FIXED rescaling for GLES2.0. Our GL_FIXED attributes * come in as floating point conversions of the integer values. @@ -115,7 +115,7 @@ apply_attr_wa_instr(nir_builder *b, nir_instr *instr, void *cb_data) : nir_u2f32(b, val); } - nir_def_rewrite_uses_after(&intrin->dest.ssa, val, + nir_def_rewrite_uses_after(&intrin->def, val, val->parent_instr); return true; diff --git a/src/intel/compiler/brw_nir_blockify_uniform_loads.c b/src/intel/compiler/brw_nir_blockify_uniform_loads.c index 56c4250..4224f61 100644 --- a/src/intel/compiler/brw_nir_blockify_uniform_loads.c +++ b/src/intel/compiler/brw_nir_blockify_uniform_loads.c @@ -52,13 +52,13 @@ brw_nir_blockify_uniform_loads_instr(nir_builder *b, if (nir_src_is_divergent(intrin->src[1])) return false; - if (intrin->dest.ssa.bit_size != 32) + if (intrin->def.bit_size != 32) return false; /* Without the LSC, we can only do block loads of at least 4dwords (1 * oword). */ - if (!devinfo->has_lsc && intrin->dest.ssa.num_components < 4) + if (!devinfo->has_lsc && intrin->def.num_components < 4) return false; intrin->intrinsic = @@ -75,7 +75,7 @@ brw_nir_blockify_uniform_loads_instr(nir_builder *b, if (nir_src_is_divergent(intrin->src[0])) return false; - if (intrin->dest.ssa.bit_size != 32) + if (intrin->def.bit_size != 32) return false; intrin->intrinsic = nir_intrinsic_load_shared_uniform_block_intel; @@ -85,13 +85,13 @@ brw_nir_blockify_uniform_loads_instr(nir_builder *b, if (nir_src_is_divergent(intrin->src[0])) return false; - if (intrin->dest.ssa.bit_size != 32) + if (intrin->def.bit_size != 32) return false; /* Without the LSC, we can only do block loads of at least 4dwords (1 * oword). */ - if (!devinfo->has_lsc && intrin->dest.ssa.num_components < 4) + if (!devinfo->has_lsc && intrin->def.num_components < 4) return false; intrin->intrinsic = nir_intrinsic_load_global_constant_uniform_block_intel; diff --git a/src/intel/compiler/brw_nir_clamp_image_1d_2d_array_sizes.c b/src/intel/compiler/brw_nir_clamp_image_1d_2d_array_sizes.c index 798f336..569caec 100644 --- a/src/intel/compiler/brw_nir_clamp_image_1d_2d_array_sizes.c +++ b/src/intel/compiler/brw_nir_clamp_image_1d_2d_array_sizes.c @@ -53,7 +53,7 @@ brw_nir_clamp_image_1d_2d_array_sizes_instr(nir_builder *b, if (!nir_intrinsic_image_array(intr)) break; - image_size = &intr->dest.ssa; + image_size = &intr->def; break; case nir_intrinsic_image_deref_size: { @@ -64,7 +64,7 @@ brw_nir_clamp_image_1d_2d_array_sizes_instr(nir_builder *b, if (!glsl_sampler_type_is_array(deref->type)) break; - image_size = &intr->dest.ssa; + image_size = &intr->def; break; } @@ -82,7 +82,7 @@ brw_nir_clamp_image_1d_2d_array_sizes_instr(nir_builder *b, if (!tex_instr->is_array) break; - image_size = &tex_instr->dest.ssa; + image_size = &tex_instr->def; break; } diff --git a/src/intel/compiler/brw_nir_clamp_per_vertex_loads.c b/src/intel/compiler/brw_nir_clamp_per_vertex_loads.c index fc0d98a..ba02129 100644 --- a/src/intel/compiler/brw_nir_clamp_per_vertex_loads.c +++ b/src/intel/compiler/brw_nir_clamp_per_vertex_loads.c @@ -102,7 +102,7 @@ lower_patch_vertices_instr(nir_builder *b, nir_instr *instr, void *cb_data) b->cursor = nir_before_instr(instr); - nir_def_rewrite_uses(&intrin->dest.ssa, nir_imm_int(b, *input_vertices)); + nir_def_rewrite_uses(&intrin->def, nir_imm_int(b, *input_vertices)); return true; } diff --git a/src/intel/compiler/brw_nir_lower_cs_intrinsics.c b/src/intel/compiler/brw_nir_lower_cs_intrinsics.c index 1eb447e..fabe168 100644 --- a/src/intel/compiler/brw_nir_lower_cs_intrinsics.c +++ b/src/intel/compiler/brw_nir_lower_cs_intrinsics.c @@ -193,10 +193,10 @@ lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state, case nir_intrinsic_load_workgroup_id: case nir_intrinsic_load_num_workgroups: /* Convert this to 32-bit if it's not */ - if (intrinsic->dest.ssa.bit_size == 64) { - intrinsic->dest.ssa.bit_size = 32; - sysval = nir_u2u64(b, &intrinsic->dest.ssa); - nir_def_rewrite_uses_after(&intrinsic->dest.ssa, + if (intrinsic->def.bit_size == 64) { + intrinsic->def.bit_size = 32; + sysval = nir_u2u64(b, &intrinsic->def); + nir_def_rewrite_uses_after(&intrinsic->def, sysval, sysval->parent_instr); } @@ -262,10 +262,10 @@ lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state, continue; } - if (intrinsic->dest.ssa.bit_size == 64) + if (intrinsic->def.bit_size == 64) sysval = nir_u2u64(b, sysval); - nir_def_rewrite_uses(&intrinsic->dest.ssa, sysval); + nir_def_rewrite_uses(&intrinsic->def, sysval); nir_instr_remove(&intrinsic->instr); state->progress = true; diff --git a/src/intel/compiler/brw_nir_lower_intersection_shader.c b/src/intel/compiler/brw_nir_lower_intersection_shader.c index 4bbcf23..d4586aa 100644 --- a/src/intel/compiler/brw_nir_lower_intersection_shader.c +++ b/src/intel/compiler/brw_nir_lower_intersection_shader.c @@ -92,13 +92,13 @@ lower_any_hit_for_intersection(nir_shader *any_hit) break; case nir_intrinsic_load_ray_t_max: - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, hit_t); nir_instr_remove(&intrin->instr); break; case nir_intrinsic_load_ray_hit_kind: - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, hit_kind); nir_instr_remove(&intrin->instr); break; @@ -214,7 +214,7 @@ brw_nir_lower_intersection_shader(nir_shader *intersection, nir_push_if(b, nir_inot(b, nir_load_leaf_opaque_intel(b))); { nir_def *params[] = { - &nir_build_deref_var(b, commit_tmp)->dest.ssa, + &nir_build_deref_var(b, commit_tmp)->def, hit_t, hit_kind, }; @@ -236,7 +236,7 @@ brw_nir_lower_intersection_shader(nir_shader *intersection, nir_pop_if(b, NULL); nir_def *accepted = nir_load_var(b, commit_tmp); - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, accepted); break; } diff --git a/src/intel/compiler/brw_nir_lower_non_uniform_resource_intel.c b/src/intel/compiler/brw_nir_lower_non_uniform_resource_intel.c index c9ab79f..c584b31 100644 --- a/src/intel/compiler/brw_nir_lower_non_uniform_resource_intel.c +++ b/src/intel/compiler/brw_nir_lower_non_uniform_resource_intel.c @@ -132,7 +132,7 @@ brw_nir_lower_non_uniform_intrinsic(nir_builder *b, nir_instr_as_intrinsic(new_instr); nir_src_rewrite(&new_resource_intel->src[1], intrin->src[source].ssa); - nir_src_rewrite(&intrin->src[source], &new_resource_intel->dest.ssa); + nir_src_rewrite(&intrin->src[source], &new_resource_intel->def); return true; } @@ -166,7 +166,7 @@ brw_nir_lower_non_uniform_tex(nir_builder *b, nir_instr_as_intrinsic(new_instr); nir_src_rewrite(&new_resource_intel->src[1], tex->src[s].src.ssa); - nir_src_rewrite(&tex->src[s].src, &new_resource_intel->dest.ssa); + nir_src_rewrite(&tex->src[s].src, &new_resource_intel->def); progress = true; } @@ -291,7 +291,7 @@ brw_nir_cleanup_resource_intel_instr(nir_builder *b, return false; bool progress = false; - nir_foreach_use_safe(src, &intrin->dest.ssa) { + nir_foreach_use_safe(src, &intrin->def) { if (!src->is_if && skip_resource_intel_cleanup(src->parent_instr)) continue; diff --git a/src/intel/compiler/brw_nir_lower_ray_queries.c b/src/intel/compiler/brw_nir_lower_ray_queries.c index 5e11e5c..4ca2496 100644 --- a/src/intel/compiler/brw_nir_lower_ray_queries.c +++ b/src/intel/compiler/brw_nir_lower_ray_queries.c @@ -324,7 +324,7 @@ lower_ray_query_intrinsic(nir_builder *b, } nir_pop_if(b, NULL); not_done = nir_if_phi(b, not_done_then, not_done_else); - nir_def_rewrite_uses(&intrin->dest.ssa, not_done); + nir_def_rewrite_uses(&intrin->def, not_done); break; } @@ -490,7 +490,7 @@ lower_ray_query_intrinsic(nir_builder *b, } assert(sysval); - nir_def_rewrite_uses(&intrin->dest.ssa, sysval); + nir_def_rewrite_uses(&intrin->def, sysval); break; } diff --git a/src/intel/compiler/brw_nir_lower_rt_intrinsics.c b/src/intel/compiler/brw_nir_lower_rt_intrinsics.c index 39ffde7..353e918 100644 --- a/src/intel/compiler/brw_nir_lower_rt_intrinsics.c +++ b/src/intel/compiler/brw_nir_lower_rt_intrinsics.c @@ -340,7 +340,7 @@ lower_rt_intrinsics_impl(nir_function_impl *impl, progress = true; if (sysval) { - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, sysval); nir_instr_remove(&intrin->instr); } diff --git a/src/intel/compiler/brw_nir_lower_shading_rate_output.c b/src/intel/compiler/brw_nir_lower_shading_rate_output.c index f5dbbd7..bca3d37 100644 --- a/src/intel/compiler/brw_nir_lower_shading_rate_output.c +++ b/src/intel/compiler/brw_nir_lower_shading_rate_output.c @@ -86,7 +86,7 @@ lower_shading_rate_output_instr(nir_builder *b, nir_instr *instr, nir_instr_rewrite_src(instr, &intrin->src[0], nir_src_for_ssa(packed_fp16_xy)); } else { - nir_def *packed_fp16_xy = &intrin->dest.ssa; + nir_def *packed_fp16_xy = &intrin->def; nir_def *u32_x = nir_i2i32(b, nir_unpack_32_2x16_split_x(b, packed_fp16_xy)); diff --git a/src/intel/compiler/brw_nir_lower_sparse.c b/src/intel/compiler/brw_nir_lower_sparse.c index bf1c239..9c92398 100644 --- a/src/intel/compiler/brw_nir_lower_sparse.c +++ b/src/intel/compiler/brw_nir_lower_sparse.c @@ -56,7 +56,7 @@ lower_is_sparse_texels_resident(nir_builder *b, nir_intrinsic_instr *intrin) b->cursor = nir_instr_remove(&intrin->instr); nir_def_rewrite_uses( - &intrin->dest.ssa, + &intrin->def, nir_i2b(b, nir_iand(b, intrin->src[0].ssa, nir_ishl(b, nir_imm_int(b, 1), nir_load_subgroup_invocation(b))))); @@ -68,7 +68,7 @@ lower_sparse_residency_code_and(nir_builder *b, nir_intrinsic_instr *intrin) b->cursor = nir_instr_remove(&intrin->instr); nir_def_rewrite_uses( - &intrin->dest.ssa, + &intrin->def, nir_iand(b, intrin->src[0].ssa, intrin->src[1].ssa)); } @@ -82,7 +82,7 @@ lower_sparse_image_load(nir_builder *b, nir_intrinsic_instr *intrin) if (intrin->intrinsic == nir_intrinsic_image_sparse_load) { img_load = nir_image_load(b, intrin->num_components - 1, - intrin->dest.ssa.bit_size, + intrin->def.bit_size, intrin->src[0].ssa, intrin->src[1].ssa, intrin->src[2].ssa, @@ -92,7 +92,7 @@ lower_sparse_image_load(nir_builder *b, nir_intrinsic_instr *intrin) } else { img_load = nir_bindless_image_load(b, intrin->num_components - 1, - intrin->dest.ssa.bit_size, + intrin->def.bit_size, intrin->src[0].ssa, intrin->src[1].ssa, intrin->src[2].ssa, @@ -156,15 +156,15 @@ lower_sparse_image_load(nir_builder *b, nir_intrinsic_instr *intrin) tex->src[2].src_type = nir_tex_src_lod; tex->src[2].src = nir_src_for_ssa(nir_imm_int(b, 0)); - nir_def_init(&tex->instr, &tex->dest.ssa, 5, - intrin->dest.ssa.bit_size); + nir_def_init(&tex->instr, &tex->def, 5, + intrin->def.bit_size); nir_builder_instr_insert(b, &tex->instr); - dests[intrin->num_components - 1] = nir_channel(b, &tex->dest.ssa, 4); + dests[intrin->num_components - 1] = nir_channel(b, &tex->def, 4); nir_def_rewrite_uses( - &intrin->dest.ssa, + &intrin->def, nir_vec(b, dests, intrin->num_components)); } @@ -175,8 +175,8 @@ lower_tex_compare(nir_builder *b, nir_tex_instr *tex, int compare_idx) /* Clone the original instruction */ nir_tex_instr *sparse_tex = nir_instr_as_tex(nir_instr_clone(b->shader, &tex->instr)); - nir_def_init(&sparse_tex->instr, &sparse_tex->dest.ssa, - tex->dest.ssa.num_components, tex->dest.ssa.bit_size); + nir_def_init(&sparse_tex->instr, &sparse_tex->def, + tex->def.num_components, tex->def.bit_size); nir_builder_instr_insert(b, &sparse_tex->instr); /* Drop the compare source on the cloned instruction */ @@ -184,17 +184,17 @@ lower_tex_compare(nir_builder *b, nir_tex_instr *tex, int compare_idx) /* Drop the residency query on the original tex instruction */ tex->is_sparse = false; - tex->dest.ssa.num_components = tex->dest.ssa.num_components - 1; + tex->def.num_components = tex->def.num_components - 1; nir_def *new_comps[NIR_MAX_VEC_COMPONENTS]; - for (unsigned i = 0; i < tex->dest.ssa.num_components; i++) - new_comps[i] = nir_channel(b, &tex->dest.ssa, i); - new_comps[tex->dest.ssa.num_components] = - nir_channel(b, &sparse_tex->dest.ssa, tex->dest.ssa.num_components); + for (unsigned i = 0; i < tex->def.num_components; i++) + new_comps[i] = nir_channel(b, &tex->def, i); + new_comps[tex->def.num_components] = + nir_channel(b, &sparse_tex->def, tex->def.num_components); - nir_def *new_vec = nir_vec(b, new_comps, sparse_tex->dest.ssa.num_components); + nir_def *new_vec = nir_vec(b, new_comps, sparse_tex->def.num_components); - nir_def_rewrite_uses_after(&tex->dest.ssa, new_vec, new_vec->parent_instr); + nir_def_rewrite_uses_after(&tex->def, new_vec, new_vec->parent_instr); } static bool diff --git a/src/intel/compiler/brw_nir_lower_storage_image.c b/src/intel/compiler/brw_nir_lower_storage_image.c index ceebfc6..d051be2 100644 --- a/src/intel/compiler/brw_nir_lower_storage_image.c +++ b/src/intel/compiler/brw_nir_lower_storage_image.c @@ -33,7 +33,7 @@ _load_image_param(nir_builder *b, nir_deref_instr *deref, unsigned offset) nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, nir_intrinsic_image_deref_load_param_intel); - load->src[0] = nir_src_for_ssa(&deref->dest.ssa); + load->src[0] = nir_src_for_ssa(&deref->def); nir_intrinsic_set_base(load, offset / 4); switch (offset) { @@ -51,10 +51,10 @@ _load_image_param(nir_builder *b, nir_deref_instr *deref, unsigned offset) default: unreachable("Invalid param offset"); } - nir_def_init(&load->instr, &load->dest.ssa, load->num_components, 32); + nir_def_init(&load->instr, &load->def, load->num_components, 32); nir_builder_instr_insert(b, &load->instr); - return &load->dest.ssa; + return &load->def; } #define load_image_param(b, d, o) \ @@ -384,22 +384,22 @@ lower_image_load_instr(nir_builder *b, * conversion. */ nir_def *placeholder = nir_undef(b, 4, 32); - nir_def_rewrite_uses(&intrin->dest.ssa, placeholder); + nir_def_rewrite_uses(&intrin->def, placeholder); intrin->num_components = isl_format_get_num_channels(lower_fmt); - intrin->dest.ssa.num_components = intrin->num_components; + intrin->def.num_components = intrin->num_components; b->cursor = nir_after_instr(&intrin->instr); nir_def *color = convert_color_for_load(b, devinfo, - &intrin->dest.ssa, + &intrin->def, image_fmt, lower_fmt, dest_components); if (sparse) { /* Put the sparse component back on the original instruction */ intrin->num_components++; - intrin->dest.ssa.num_components = intrin->num_components; + intrin->def.num_components = intrin->num_components; /* Carry over the sparse component without modifying it with the * converted color. @@ -408,7 +408,7 @@ lower_image_load_instr(nir_builder *b, for (unsigned i = 0; i < dest_components; i++) sparse_color[i] = nir_channel(b, color, i); sparse_color[dest_components] = - nir_channel(b, &intrin->dest.ssa, intrin->num_components - 1); + nir_channel(b, &intrin->def, intrin->num_components - 1); color = nir_vec(b, sparse_color, dest_components + 1); } @@ -451,7 +451,7 @@ lower_image_load_instr(nir_builder *b, nir_def *addr = image_address(b, devinfo, deref, coord); nir_def *load = nir_image_deref_load_raw_intel(b, image_fmtl->bpb / 32, 32, - &deref->dest.ssa, addr); + &deref->def, addr); nir_push_else(b, NULL); @@ -465,7 +465,7 @@ lower_image_load_instr(nir_builder *b, image_fmt, raw_fmt, dest_components); - nir_def_rewrite_uses(&intrin->dest.ssa, color); + nir_def_rewrite_uses(&intrin->def, color); } return true; @@ -607,7 +607,7 @@ lower_image_store_instr(nir_builder *b, nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, nir_intrinsic_image_deref_store_raw_intel); - store->src[0] = nir_src_for_ssa(&deref->dest.ssa); + store->src[0] = nir_src_for_ssa(&deref->def); store->src[1] = nir_src_for_ssa(addr); store->src[2] = nir_src_for_ssa(color); store->num_components = image_fmtl->bpb / 32; @@ -633,7 +633,7 @@ lower_image_atomic_instr(nir_builder *b, /* Use an undef to hold the uses of the load conversion. */ nir_def *placeholder = nir_undef(b, 4, 32); - nir_def_rewrite_uses(&intrin->dest.ssa, placeholder); + nir_def_rewrite_uses(&intrin->def, placeholder); /* Check the first component of the size field to find out if the * image is bound. Necessary on IVB for typed atomics because @@ -648,7 +648,7 @@ lower_image_atomic_instr(nir_builder *b, nir_pop_if(b, NULL); - nir_def *result = nir_if_phi(b, &intrin->dest.ssa, zero); + nir_def *result = nir_if_phi(b, &intrin->def, zero); nir_def_rewrite_uses(placeholder, result); return true; @@ -692,11 +692,11 @@ lower_image_size_instr(nir_builder *b, for (unsigned c = 0; c < coord_comps; c++) comps[c] = nir_channel(b, size, c); - for (unsigned c = coord_comps; c < intrin->dest.ssa.num_components; ++c) + for (unsigned c = coord_comps; c < intrin->def.num_components; ++c) comps[c] = nir_imm_int(b, 1); - nir_def *vec = nir_vec(b, comps, intrin->dest.ssa.num_components); - nir_def_rewrite_uses(&intrin->dest.ssa, vec); + nir_def *vec = nir_vec(b, comps, intrin->def.num_components); + nir_def_rewrite_uses(&intrin->def, vec); return true; } diff --git a/src/intel/compiler/brw_nir_rt.c b/src/intel/compiler/brw_nir_rt.c index 9ddbfa6..2dc8ffe 100644 --- a/src/intel/compiler/brw_nir_rt.c +++ b/src/intel/compiler/brw_nir_rt.c @@ -28,12 +28,12 @@ static bool resize_deref(nir_builder *b, nir_deref_instr *deref, unsigned num_components, unsigned bit_size) { - if (deref->dest.ssa.num_components == num_components && - deref->dest.ssa.bit_size == bit_size) + if (deref->def.num_components == num_components && + deref->def.bit_size == bit_size) return false; /* NIR requires array indices have to match the deref bit size */ - if (deref->dest.ssa.bit_size != bit_size && + if (deref->def.bit_size != bit_size && (deref->deref_type == nir_deref_type_array || deref->deref_type == nir_deref_type_ptr_as_array)) { b->cursor = nir_before_instr(&deref->instr); @@ -47,8 +47,8 @@ resize_deref(nir_builder *b, nir_deref_instr *deref, nir_src_for_ssa(idx)); } - deref->dest.ssa.num_components = num_components; - deref->dest.ssa.bit_size = bit_size; + deref->def.num_components = num_components; + deref->def.bit_size = bit_size; return true; } @@ -117,8 +117,8 @@ lower_rt_io_derefs(nir_shader *shader) nir_build_deref_cast(&b, call_data_addr, nir_var_function_temp, deref->var->type, 0); - nir_def_rewrite_uses(&deref->dest.ssa, - &cast->dest.ssa); + nir_def_rewrite_uses(&deref->def, + &cast->def); nir_instr_remove(&deref->instr); progress = true; } @@ -130,8 +130,8 @@ lower_rt_io_derefs(nir_shader *shader) nir_build_deref_cast(&b, hit_attrib_addr, nir_var_function_temp, deref->type, 0); - nir_def_rewrite_uses(&deref->dest.ssa, - &cast->dest.ssa); + nir_def_rewrite_uses(&deref->def, + &cast->def); nir_instr_remove(&deref->instr); progress = true; } @@ -520,7 +520,7 @@ brw_nir_create_raygen_trampoline(const struct brw_compiler *compiler, b.cursor = nir_before_instr(&intrin->instr); nir_def *global_arg_addr = load_trampoline_param(&b, rt_disp_globals_addr, 1, 64); - nir_def_rewrite_uses(&intrin->dest.ssa, + nir_def_rewrite_uses(&intrin->def, global_arg_addr); nir_instr_remove(instr); } diff --git a/src/intel/compiler/brw_vec4_gs_nir.cpp b/src/intel/compiler/brw_vec4_gs_nir.cpp index 8f7a8b6..818688d 100644 --- a/src/intel/compiler/brw_vec4_gs_nir.cpp +++ b/src/intel/compiler/brw_vec4_gs_nir.cpp @@ -33,7 +33,7 @@ vec4_gs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) switch (instr->intrinsic) { case nir_intrinsic_load_per_vertex_input: { - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); /* The EmitNoIndirectInput flag guarantees our vertex index will * be constant. We should handle indirects someday. */ @@ -50,7 +50,7 @@ vec4_gs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) type); src.swizzle = BRW_SWZ_COMP_INPUT(nir_intrinsic_component(instr)); - dest = get_nir_def(instr->dest.ssa, src.type); + dest = get_nir_def(instr->def, src.type); dest.writemask = brw_writemask_for_size(instr->num_components); emit(MOV(dest, src)); break; @@ -78,12 +78,12 @@ vec4_gs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) case nir_intrinsic_load_primitive_id: assert(gs_prog_data->include_primitive_id); - dest = get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_D); + dest = get_nir_def(instr->def, BRW_REGISTER_TYPE_D); emit(MOV(dest, retype(brw_vec4_grf(1, 0), BRW_REGISTER_TYPE_D))); break; case nir_intrinsic_load_invocation_id: { - dest = get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_D); + dest = get_nir_def(instr->def, BRW_REGISTER_TYPE_D); if (gs_prog_data->invocations > 1) emit(GS_OPCODE_GET_INSTANCE_ID, dest); else diff --git a/src/intel/compiler/brw_vec4_nir.cpp b/src/intel/compiler/brw_vec4_nir.cpp index 336c0f8..9eb81bf 100644 --- a/src/intel/compiler/brw_vec4_nir.cpp +++ b/src/intel/compiler/brw_vec4_nir.cpp @@ -411,7 +411,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) if (bit_size == 64) reg.type = BRW_REGISTER_TYPE_DF; - nir_ssa_values[instr->dest.ssa.index] = reg; + nir_ssa_values[instr->def.index] = reg; break; } @@ -423,11 +423,11 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) break; case nir_intrinsic_load_input: { - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); /* We set EmitNoIndirectInput for VS */ unsigned load_offset = nir_src_as_uint(instr->src[0]); - dest = get_nir_def(instr->dest.ssa); + dest = get_nir_def(instr->def); src = src_reg(ATTR, nir_intrinsic_base(instr) + load_offset, glsl_type::uvec4_type); @@ -457,7 +457,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) unsigned ssbo_index = nir_src_is_const(instr->src[0]) ? nir_src_as_uint(instr->src[0]) : 0; - dst_reg result_dst = get_nir_def(instr->dest.ssa); + dst_reg result_dst = get_nir_def(instr->def); vec4_instruction *inst = new(mem_ctx) vec4_instruction(SHADER_OPCODE_GET_BUFFER_SIZE, result_dst); @@ -541,7 +541,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) assert(devinfo->ver == 7); /* brw_nir_lower_mem_access_bit_sizes takes care of this */ - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); src_reg surf_index = get_nir_ssbo_intrinsic_index(instr); src_reg offset_reg = retype(get_nir_src_imm(instr->src[1]), @@ -554,7 +554,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) src_reg read_result = emit_untyped_read(bld, surf_index, offset_reg, 1 /* dims */, 4 /* size*/, BRW_PREDICATE_NONE); - dst_reg dest = get_nir_def(instr->dest.ssa); + dst_reg dest = get_nir_def(instr->def); read_result.type = dest.type; read_result.swizzle = brw_swizzle_for_size(instr->num_components); emit(MOV(dest, read_result)); @@ -581,7 +581,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) /* Offsets are in bytes but they should always be multiples of 4 */ assert(nir_intrinsic_base(instr) % 4 == 0); - dest = get_nir_def(instr->dest.ssa); + dest = get_nir_def(instr->def); src = src_reg(dst_reg(UNIFORM, nir_intrinsic_base(instr) / 16)); src.type = dest.type; @@ -632,7 +632,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) case nir_intrinsic_load_ubo: { src_reg surf_index; - dest = get_nir_def(instr->dest.ssa); + dest = get_nir_def(instr->def); if (nir_src_is_const(instr->src[0])) { /* The block index is a constant, so just emit the binding table entry @@ -685,7 +685,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) src_reg packed_consts; if (push_reg.file != BAD_FILE) { packed_consts = push_reg; - } else if (instr->dest.ssa.bit_size == 32) { + } else if (instr->def.bit_size == 32) { packed_consts = src_reg(this, glsl_type::vec4_type); emit_pull_constant_load_reg(dst_reg(packed_consts), surf_index, @@ -743,7 +743,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) const src_reg shader_clock = get_timestamp(); const enum brw_reg_type type = brw_type_for_base_type(glsl_type::uvec2_type); - dest = get_nir_def(instr->dest.ssa, type); + dest = get_nir_def(instr->def, type); emit(MOV(dest, shader_clock)); break; } @@ -758,7 +758,7 @@ vec4_visitor::nir_emit_ssbo_atomic(int op, nir_intrinsic_instr *instr) { dst_reg dest; if (nir_intrinsic_infos[instr->intrinsic].has_dest) - dest = get_nir_def(instr->dest.ssa); + dest = get_nir_def(instr->def); src_reg surface = get_nir_ssbo_intrinsic_index(instr); src_reg offset = get_nir_src(instr->src[1], 1); @@ -1884,7 +1884,7 @@ vec4_visitor::nir_emit_texture(nir_tex_instr *instr) src_reg sample_index; src_reg mcs; - dst_reg dest = get_nir_def(instr->dest.ssa, instr->dest_type); + dst_reg dest = get_nir_def(instr->def, instr->dest_type); /* The hardware requires a LOD for buffer textures */ if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) diff --git a/src/intel/compiler/brw_vec4_tcs.cpp b/src/intel/compiler/brw_vec4_tcs.cpp index aa3fe85..0bc3a08 100644 --- a/src/intel/compiler/brw_vec4_tcs.cpp +++ b/src/intel/compiler/brw_vec4_tcs.cpp @@ -241,19 +241,19 @@ vec4_tcs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) { switch (instr->intrinsic) { case nir_intrinsic_load_invocation_id: - emit(MOV(get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_UD), + emit(MOV(get_nir_def(instr->def, BRW_REGISTER_TYPE_UD), invocation_id)); break; case nir_intrinsic_load_primitive_id: emit(TCS_OPCODE_GET_PRIMITIVE_ID, - get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_UD)); + get_nir_def(instr->def, BRW_REGISTER_TYPE_UD)); break; case nir_intrinsic_load_patch_vertices_in: - emit(MOV(get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_D), + emit(MOV(get_nir_def(instr->def, BRW_REGISTER_TYPE_D), brw_imm_d(key->input_vertices))); break; case nir_intrinsic_load_per_vertex_input: { - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); src_reg indirect_offset = get_indirect_offset(instr); unsigned imm_offset = nir_intrinsic_base(instr); @@ -261,7 +261,7 @@ vec4_tcs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) BRW_REGISTER_TYPE_UD); unsigned first_component = nir_intrinsic_component(instr); - dst_reg dst = get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_D); + dst_reg dst = get_nir_def(instr->def, BRW_REGISTER_TYPE_D); dst.writemask = brw_writemask_for_size(instr->num_components); emit_input_urb_read(dst, vertex_index, imm_offset, first_component, indirect_offset); @@ -275,7 +275,7 @@ vec4_tcs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) src_reg indirect_offset = get_indirect_offset(instr); unsigned imm_offset = nir_intrinsic_base(instr); - dst_reg dst = get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_D); + dst_reg dst = get_nir_def(instr->def, BRW_REGISTER_TYPE_D); dst.writemask = brw_writemask_for_size(instr->num_components); emit_output_urb_read(dst, imm_offset, nir_intrinsic_component(instr), diff --git a/src/intel/compiler/brw_vec4_tes.cpp b/src/intel/compiler/brw_vec4_tes.cpp index ef2bea7..2e3fced 100644 --- a/src/intel/compiler/brw_vec4_tes.cpp +++ b/src/intel/compiler/brw_vec4_tes.cpp @@ -118,38 +118,38 @@ vec4_tes_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) switch (instr->intrinsic) { case nir_intrinsic_load_tess_coord: /* gl_TessCoord is part of the payload in g1 channels 0-2 and 4-6. */ - emit(MOV(get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_F), + emit(MOV(get_nir_def(instr->def, BRW_REGISTER_TYPE_F), src_reg(brw_vec8_grf(1, 0)))); break; case nir_intrinsic_load_tess_level_outer: if (tes_prog_data->domain == BRW_TESS_DOMAIN_ISOLINE) { - emit(MOV(get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_F), + emit(MOV(get_nir_def(instr->def, BRW_REGISTER_TYPE_F), swizzle(src_reg(ATTR, 1, glsl_type::vec4_type), BRW_SWIZZLE_ZWZW))); } else { - emit(MOV(get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_F), + emit(MOV(get_nir_def(instr->def, BRW_REGISTER_TYPE_F), swizzle(src_reg(ATTR, 1, glsl_type::vec4_type), BRW_SWIZZLE_WZYX))); } break; case nir_intrinsic_load_tess_level_inner: if (tes_prog_data->domain == BRW_TESS_DOMAIN_QUAD) { - emit(MOV(get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_F), + emit(MOV(get_nir_def(instr->def, BRW_REGISTER_TYPE_F), swizzle(src_reg(ATTR, 0, glsl_type::vec4_type), BRW_SWIZZLE_WZYX))); } else { - emit(MOV(get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_F), + emit(MOV(get_nir_def(instr->def, BRW_REGISTER_TYPE_F), src_reg(ATTR, 1, glsl_type::float_type))); } break; case nir_intrinsic_load_primitive_id: emit(TES_OPCODE_GET_PRIMITIVE_ID, - get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_UD)); + get_nir_def(instr->def, BRW_REGISTER_TYPE_UD)); break; case nir_intrinsic_load_input: case nir_intrinsic_load_per_vertex_input: { - assert(instr->dest.ssa.bit_size == 32); + assert(instr->def.bit_size == 32); src_reg indirect_offset = get_indirect_offset(instr); unsigned imm_offset = instr->const_index[0]; src_reg header = input_read_header; @@ -178,7 +178,7 @@ vec4_tes_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) src_reg src = src_reg(ATTR, imm_offset, glsl_type::ivec4_type); src.swizzle = BRW_SWZ_COMP_INPUT(first_component); - emit(MOV(get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_D), src)); + emit(MOV(get_nir_def(instr->def, BRW_REGISTER_TYPE_D), src)); prog_data->urb_read_length = MAX2(prog_data->urb_read_length, @@ -199,7 +199,7 @@ vec4_tes_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) /* Copy to target. We might end up with some funky writemasks landing * in here, but we really don't want them in the above pseudo-ops. */ - dst_reg dst = get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_D); + dst_reg dst = get_nir_def(instr->def, BRW_REGISTER_TYPE_D); dst.writemask = brw_writemask_for_size(instr->num_components); emit(MOV(dst, src)); break; diff --git a/src/intel/vulkan/anv_internal_kernels.c b/src/intel/vulkan/anv_internal_kernels.c index 92a64db..14f3e38 100644 --- a/src/intel/vulkan/anv_internal_kernels.c +++ b/src/intel/vulkan/anv_internal_kernels.c @@ -93,7 +93,7 @@ lower_vulkan_descriptors_instr(nir_builder *b, nir_instr *instr, void *cb_data) nir_imm_int(b, 0)); } - nir_def_rewrite_uses(&intrin->dest.ssa, desc_value); + nir_def_rewrite_uses(&intrin->def, desc_value); return true; } @@ -121,7 +121,7 @@ lower_base_workgroup_id(nir_builder *b, nir_instr *instr, UNUSED void *data) return false; b->cursor = nir_instr_remove(&intrin->instr); - nir_def_rewrite_uses(&intrin->dest.ssa, nir_imm_zero(b, 3, 32)); + nir_def_rewrite_uses(&intrin->def, nir_imm_zero(b, 3, 32)); return true; } @@ -138,14 +138,14 @@ lower_load_ubo_to_uniforms(nir_builder *b, nir_instr *instr, void *cb_data) b->cursor = nir_instr_remove(instr); nir_def_rewrite_uses( - &intrin->dest.ssa, + &intrin->def, nir_load_uniform(b, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size, + intrin->def.num_components, + intrin->def.bit_size, intrin->src[1].ssa, .base = 0, - .range = intrin->dest.ssa.num_components * - intrin->dest.ssa.bit_size / 8)); + .range = intrin->def.num_components * + intrin->def.bit_size / 8)); return true; } diff --git a/src/intel/vulkan/anv_mesh_perprim_wa.c b/src/intel/vulkan/anv_mesh_perprim_wa.c index 08d43ba..fc4f731 100644 --- a/src/intel/vulkan/anv_mesh_perprim_wa.c +++ b/src/intel/vulkan/anv_mesh_perprim_wa.c @@ -452,7 +452,7 @@ anv_frag_update_derefs_instr(struct nir_builder *b, nir_instr *instr, void *data return false; nir_instr_remove(&deref->instr); - nir_def_rewrite_uses(&deref->dest.ssa, &new_derefs[location]->dest.ssa); + nir_def_rewrite_uses(&deref->def, &new_derefs[location]->def); return true; } diff --git a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c index bbee1d6..929e2ee 100644 --- a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c +++ b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c @@ -1158,7 +1158,7 @@ try_lower_direct_buffer_intrinsic(nir_builder *b, /* 64-bit atomics only support A64 messages so we can't lower them to * the index+offset model. */ - if (is_atomic && intrin->dest.ssa.bit_size == 64 && + if (is_atomic && intrin->def.bit_size == 64 && !state->pdevice->info.has_lsc) return false; @@ -1243,9 +1243,9 @@ lower_load_accel_struct_desc(nir_builder *b, /* Acceleration structure descriptors are always uint64_t */ nir_def *desc = build_load_descriptor_mem(b, desc_addr, 0, 1, 64, state); - assert(load_desc->dest.ssa.bit_size == 64); - assert(load_desc->dest.ssa.num_components == 1); - nir_def_rewrite_uses(&load_desc->dest.ssa, desc); + assert(load_desc->def.bit_size == 64); + assert(load_desc->def.num_components == 1); + nir_def_rewrite_uses(&load_desc->def, desc); nir_instr_remove(&load_desc->instr); return true; @@ -1327,9 +1327,9 @@ lower_res_index_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin, intrin->src[0].ssa, state); - assert(intrin->dest.ssa.bit_size == index->bit_size); - assert(intrin->dest.ssa.num_components == index->num_components); - nir_def_rewrite_uses(&intrin->dest.ssa, index); + assert(intrin->def.bit_size == index->bit_size); + assert(intrin->def.num_components == index->num_components); + nir_def_rewrite_uses(&intrin->def, index); nir_instr_remove(&intrin->instr); return true; @@ -1345,9 +1345,9 @@ lower_res_reindex_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin, build_res_reindex(b, intrin->src[0].ssa, intrin->src[1].ssa); - assert(intrin->dest.ssa.bit_size == index->bit_size); - assert(intrin->dest.ssa.num_components == index->num_components); - nir_def_rewrite_uses(&intrin->dest.ssa, index); + assert(intrin->def.bit_size == index->bit_size); + assert(intrin->def.num_components == index->num_components); + nir_def_rewrite_uses(&intrin->def, index); nir_instr_remove(&intrin->instr); return true; @@ -1367,9 +1367,9 @@ lower_load_vulkan_descriptor(nir_builder *b, nir_intrinsic_instr *intrin, desc_type, intrin->src[0].ssa, addr_format, state); - assert(intrin->dest.ssa.bit_size == desc->bit_size); - assert(intrin->dest.ssa.num_components == desc->num_components); - nir_def_rewrite_uses(&intrin->dest.ssa, desc); + assert(intrin->def.bit_size == desc->bit_size); + assert(intrin->def.num_components == desc->num_components); + nir_def_rewrite_uses(&intrin->def, desc); nir_instr_remove(&intrin->instr); return true; @@ -1412,7 +1412,7 @@ lower_get_ssbo_size(nir_builder *b, nir_intrinsic_instr *intrin, } nir_def *size = nir_channel(b, desc_range, 2); - nir_def_rewrite_uses(&intrin->dest.ssa, size); + nir_def_rewrite_uses(&intrin->def, size); nir_instr_remove(&intrin->instr); return true; @@ -1474,15 +1474,15 @@ lower_image_size_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin, nir_def *image_depth = build_load_storage_3d_image_depth(b, desc_addr, - nir_channel(b, &intrin->dest.ssa, 2), + nir_channel(b, &intrin->def, 2), state); nir_def *comps[4] = {}; - for (unsigned c = 0; c < intrin->dest.ssa.num_components; c++) - comps[c] = c == 2 ? image_depth : nir_channel(b, &intrin->dest.ssa, c); + for (unsigned c = 0; c < intrin->def.num_components; c++) + comps[c] = c == 2 ? image_depth : nir_channel(b, &intrin->def, c); - nir_def *vec = nir_vec(b, comps, intrin->dest.ssa.num_components); - nir_def_rewrite_uses_after(&intrin->dest.ssa, vec, vec->parent_instr); + nir_def *vec = nir_vec(b, comps, intrin->def.num_components); + nir_def_rewrite_uses_after(&intrin->def, vec, vec->parent_instr); return true; } @@ -1500,9 +1500,9 @@ lower_load_constant(nir_builder *b, nir_intrinsic_instr *intrin, nir_def *offset = nir_iadd_imm(b, nir_ssa_for_src(b, intrin->src[0], 1), nir_intrinsic_base(intrin)); - unsigned load_size = intrin->dest.ssa.num_components * - intrin->dest.ssa.bit_size / 8; - unsigned load_align = intrin->dest.ssa.bit_size / 8; + unsigned load_size = intrin->def.num_components * + intrin->def.bit_size / 8; + unsigned load_align = intrin->def.bit_size / 8; assert(load_size < b->shader->constant_data_size); unsigned max_offset = b->shader->constant_data_size - load_size; @@ -1517,10 +1517,10 @@ lower_load_constant(nir_builder *b, nir_intrinsic_instr *intrin, nir_def *data = nir_load_global_constant(b, const_data_addr, load_align, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); + intrin->def.num_components, + intrin->def.bit_size); - nir_def_rewrite_uses(&intrin->dest.ssa, data); + nir_def_rewrite_uses(&intrin->def, data); return true; } @@ -1535,7 +1535,7 @@ lower_base_workgroup_id(nir_builder *b, nir_intrinsic_instr *intrin, nir_load_push_constant(b, 3, 32, nir_imm_int(b, 0), .base = offsetof(struct anv_push_constants, cs.base_work_group_id), .range = sizeof_field(struct anv_push_constants, cs.base_work_group_id)); - nir_def_rewrite_uses(&intrin->dest.ssa, base_workgroup_id); + nir_def_rewrite_uses(&intrin->def, base_workgroup_id); return true; } @@ -1652,7 +1652,7 @@ lower_ray_query_globals(nir_builder *b, nir_intrinsic_instr *intrin, nir_load_push_constant(b, 1, 64, nir_imm_int(b, 0), .base = offsetof(struct anv_push_constants, ray_query_globals), .range = sizeof_field(struct anv_push_constants, ray_query_globals)); - nir_def_rewrite_uses(&intrin->dest.ssa, rq_globals); + nir_def_rewrite_uses(&intrin->def, rq_globals); return true; } diff --git a/src/intel/vulkan/anv_nir_compute_push_layout.c b/src/intel/vulkan/anv_nir_compute_push_layout.c index 80d3918..864a234 100644 --- a/src/intel/vulkan/anv_nir_compute_push_layout.c +++ b/src/intel/vulkan/anv_nir_compute_push_layout.c @@ -184,7 +184,7 @@ anv_nir_compute_push_layout(nir_shader *nir, b, pc_load, nir_load_reloc_const_intel( b, BRW_SHADER_RELOC_DESCRIPTORS_ADDR_HIGH)); - nir_def_rewrite_uses(&intrin->dest.ssa, desc_addr); + nir_def_rewrite_uses(&intrin->def, desc_addr); break; } @@ -197,7 +197,7 @@ anv_nir_compute_push_layout(nir_shader *nir, .dest_type = nir_type_uint32); pc_load = nir_iand_imm( b, pc_load, ANV_DESCRIPTOR_SET_DYNAMIC_INDEX_MASK); - nir_def_rewrite_uses(&intrin->dest.ssa, pc_load); + nir_def_rewrite_uses(&intrin->def, pc_load); break; } diff --git a/src/intel/vulkan/anv_nir_lower_load_patch_vertices_in.c b/src/intel/vulkan/anv_nir_lower_load_patch_vertices_in.c index 18f5c4a..30b4b9b 100644 --- a/src/intel/vulkan/anv_nir_lower_load_patch_vertices_in.c +++ b/src/intel/vulkan/anv_nir_lower_load_patch_vertices_in.c @@ -48,7 +48,7 @@ lower_patch_vertices_in_instr(nir_builder *b, nir_instr *instr, UNUSED void *_da b->cursor = nir_before_instr(instr); nir_def_rewrite_uses( - &load->dest.ssa, + &load->def, nir_load_push_constant( b, 1, 32, nir_imm_int(b, 0), diff --git a/src/intel/vulkan/anv_nir_lower_multiview.c b/src/intel/vulkan/anv_nir_lower_multiview.c index 1b80752..e858d6d 100644 --- a/src/intel/vulkan/anv_nir_lower_multiview.c +++ b/src/intel/vulkan/anv_nir_lower_multiview.c @@ -244,7 +244,7 @@ anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask, value = build_view_index(&state); } - nir_def_rewrite_uses(&load->dest.ssa, value); + nir_def_rewrite_uses(&load->def, value); nir_instr_remove(&load->instr); } diff --git a/src/intel/vulkan/anv_nir_lower_ubo_loads.c b/src/intel/vulkan/anv_nir_lower_ubo_loads.c index b221534..a22ad55 100644 --- a/src/intel/vulkan/anv_nir_lower_ubo_loads.c +++ b/src/intel/vulkan/anv_nir_lower_ubo_loads.c @@ -42,7 +42,7 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data) if (load->intrinsic == nir_intrinsic_load_global_constant_bounded) bound = load->src[2].ssa; - unsigned bit_size = load->dest.ssa.bit_size; + unsigned bit_size = load->def.bit_size; assert(bit_size >= 8 && bit_size % 8 == 0); unsigned byte_size = bit_size / 8; @@ -90,8 +90,8 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data) nir_push_if(b, in_bounds); nir_def *load_val = - nir_build_load_global_constant(b, load->dest.ssa.num_components, - load->dest.ssa.bit_size, addr, + nir_build_load_global_constant(b, load->def.num_components, + load->def.bit_size, addr, .access = nir_intrinsic_access(load), .align_mul = nir_intrinsic_align_mul(load), .align_offset = nir_intrinsic_align_offset(load)); @@ -100,15 +100,15 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data) val = nir_if_phi(b, load_val, zero); } else { - val = nir_build_load_global_constant(b, load->dest.ssa.num_components, - load->dest.ssa.bit_size, addr, + val = nir_build_load_global_constant(b, load->def.num_components, + load->def.bit_size, addr, .access = nir_intrinsic_access(load), .align_mul = nir_intrinsic_align_mul(load), .align_offset = nir_intrinsic_align_offset(load)); } } - nir_def_rewrite_uses(&load->dest.ssa, val); + nir_def_rewrite_uses(&load->def, val); nir_instr_remove(&load->instr); return true; diff --git a/src/intel/vulkan/anv_nir_push_descriptor_analysis.c b/src/intel/vulkan/anv_nir_push_descriptor_analysis.c index e58e369..2e1c75d 100644 --- a/src/intel/vulkan/anv_nir_push_descriptor_analysis.c +++ b/src/intel/vulkan/anv_nir_push_descriptor_analysis.c @@ -211,7 +211,7 @@ anv_nir_push_desc_ubo_fully_promoted(nir_shader *nir, /* Check if the load was promoted to a push constant. */ const unsigned load_offset = const_load_offset[0].u32; const int load_bytes = nir_intrinsic_dest_components(intrin) * - (intrin->dest.ssa.bit_size / 8); + (intrin->def.bit_size / 8); for (unsigned i = 0; i < ARRAY_SIZE(bind_map->push_ranges); i++) { if (bind_map->push_ranges[i].set == binding->set && diff --git a/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c b/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c index fbe3d62..b3ee704 100644 --- a/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c +++ b/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c @@ -703,9 +703,9 @@ lower_load_accel_struct_desc(nir_builder *b, /* Acceleration structure descriptors are always uint64_t */ nir_def *desc = build_load_descriptor_mem(b, desc_addr, 0, 1, 64, state); - assert(load_desc->dest.ssa.bit_size == 64); - assert(load_desc->dest.ssa.num_components == 1); - nir_def_rewrite_uses(&load_desc->dest.ssa, desc); + assert(load_desc->def.bit_size == 64); + assert(load_desc->def.num_components == 1); + nir_def_rewrite_uses(&load_desc->def, desc); nir_instr_remove(&load_desc->instr); return true; @@ -753,9 +753,9 @@ lower_res_index_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin, intrin->src[0].ssa, addr_format, state); - assert(intrin->dest.ssa.bit_size == index->bit_size); - assert(intrin->dest.ssa.num_components == index->num_components); - nir_def_rewrite_uses(&intrin->dest.ssa, index); + assert(intrin->def.bit_size == index->bit_size); + assert(intrin->def.num_components == index->num_components); + nir_def_rewrite_uses(&intrin->def, index); nir_instr_remove(&intrin->instr); return true; @@ -775,9 +775,9 @@ lower_res_reindex_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin, intrin->src[1].ssa, addr_format); - assert(intrin->dest.ssa.bit_size == index->bit_size); - assert(intrin->dest.ssa.num_components == index->num_components); - nir_def_rewrite_uses(&intrin->dest.ssa, index); + assert(intrin->def.bit_size == index->bit_size); + assert(intrin->def.num_components == index->num_components); + nir_def_rewrite_uses(&intrin->def, index); nir_instr_remove(&intrin->instr); return true; @@ -796,9 +796,9 @@ lower_load_vulkan_descriptor(nir_builder *b, nir_intrinsic_instr *intrin, build_buffer_addr_for_res_index(b, desc_type, intrin->src[0].ssa, addr_format, state); - assert(intrin->dest.ssa.bit_size == desc->bit_size); - assert(intrin->dest.ssa.num_components == desc->num_components); - nir_def_rewrite_uses(&intrin->dest.ssa, desc); + assert(intrin->def.bit_size == desc->bit_size); + assert(intrin->def.num_components == desc->num_components); + nir_def_rewrite_uses(&intrin->def, desc); nir_instr_remove(&intrin->instr); return true; @@ -824,7 +824,7 @@ lower_get_ssbo_size(nir_builder *b, nir_intrinsic_instr *intrin, case nir_address_format_64bit_global_32bit_offset: case nir_address_format_64bit_bounded_global: { nir_def *size = nir_channel(b, desc, 2); - nir_def_rewrite_uses(&intrin->dest.ssa, size); + nir_def_rewrite_uses(&intrin->def, size); nir_instr_remove(&intrin->instr); break; } @@ -871,10 +871,10 @@ lower_image_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin, nir_def *desc = build_load_var_deref_descriptor_mem(b, deref, param * 16, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size, state); + intrin->def.num_components, + intrin->def.bit_size, state); - nir_def_rewrite_uses(&intrin->dest.ssa, desc); + nir_def_rewrite_uses(&intrin->def, desc); } else { nir_def *index = NULL; if (deref->deref_type != nir_deref_type_var) { @@ -906,9 +906,9 @@ lower_load_constant(nir_builder *b, nir_intrinsic_instr *intrin, nir_def *data; if (!anv_use_relocations(state->pdevice)) { - unsigned load_size = intrin->dest.ssa.num_components * - intrin->dest.ssa.bit_size / 8; - unsigned load_align = intrin->dest.ssa.bit_size / 8; + unsigned load_size = intrin->def.num_components * + intrin->def.bit_size / 8; + unsigned load_align = intrin->def.bit_size / 8; assert(load_size < b->shader->constant_data_size); unsigned max_offset = b->shader->constant_data_size - load_size; @@ -921,20 +921,20 @@ lower_load_constant(nir_builder *b, nir_intrinsic_instr *intrin, data = nir_load_global_constant(b, nir_iadd(b, const_data_base_addr, nir_u2u64(b, offset)), load_align, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size); + intrin->def.num_components, + intrin->def.bit_size); } else { nir_def *index = nir_imm_int(b, state->constants_offset); - data = nir_load_ubo(b, intrin->num_components, intrin->dest.ssa.bit_size, + data = nir_load_ubo(b, intrin->num_components, intrin->def.bit_size, index, offset, - .align_mul = intrin->dest.ssa.bit_size / 8, + .align_mul = intrin->def.bit_size / 8, .align_offset = 0, .range_base = nir_intrinsic_base(intrin), .range = nir_intrinsic_range(intrin)); } - nir_def_rewrite_uses(&intrin->dest.ssa, data); + nir_def_rewrite_uses(&intrin->def, data); return true; } @@ -949,7 +949,7 @@ lower_base_workgroup_id(nir_builder *b, nir_intrinsic_instr *intrin, nir_load_push_constant(b, 3, 32, nir_imm_int(b, 0), .base = offsetof(struct anv_push_constants, cs.base_work_group_id), .range = 3 * sizeof(uint32_t)); - nir_def_rewrite_uses(&intrin->dest.ssa, base_workgroup_id); + nir_def_rewrite_uses(&intrin->def, base_workgroup_id); return true; } @@ -1114,8 +1114,8 @@ lower_gfx7_tex_swizzle(nir_builder *b, nir_tex_instr *tex, unsigned plane, b->cursor = nir_after_instr(&tex->instr); - assert(tex->dest.ssa.bit_size == 32); - assert(tex->dest.ssa.num_components == 4); + assert(tex->def.bit_size == 32); + assert(tex->def.num_components == 4); /* Initializing to undef is ok; nir_opt_undef will clean it up. */ nir_def *undef = nir_undef(b, 1, 32); @@ -1128,10 +1128,10 @@ lower_gfx7_tex_swizzle(nir_builder *b, nir_tex_instr *tex, unsigned plane, comps[ISL_CHANNEL_SELECT_ONE] = nir_imm_float(b, 1); else comps[ISL_CHANNEL_SELECT_ONE] = nir_imm_int(b, 1); - comps[ISL_CHANNEL_SELECT_RED] = nir_channel(b, &tex->dest.ssa, 0); - comps[ISL_CHANNEL_SELECT_GREEN] = nir_channel(b, &tex->dest.ssa, 1); - comps[ISL_CHANNEL_SELECT_BLUE] = nir_channel(b, &tex->dest.ssa, 2); - comps[ISL_CHANNEL_SELECT_ALPHA] = nir_channel(b, &tex->dest.ssa, 3); + comps[ISL_CHANNEL_SELECT_RED] = nir_channel(b, &tex->def, 0); + comps[ISL_CHANNEL_SELECT_GREEN] = nir_channel(b, &tex->def, 1); + comps[ISL_CHANNEL_SELECT_BLUE] = nir_channel(b, &tex->def, 2); + comps[ISL_CHANNEL_SELECT_ALPHA] = nir_channel(b, &tex->def, 3); nir_def *swiz_comps[4]; for (unsigned i = 0; i < 4; i++) { @@ -1141,7 +1141,7 @@ lower_gfx7_tex_swizzle(nir_builder *b, nir_tex_instr *tex, unsigned plane, nir_def *swiz_tex_res = nir_vec(b, swiz_comps, 4); /* Rewrite uses before we insert so we don't rewrite this use */ - nir_def_rewrite_uses_after(&tex->dest.ssa, + nir_def_rewrite_uses_after(&tex->def, swiz_tex_res, swiz_tex_res->parent_instr); } diff --git a/src/intel/vulkan_hasvk/anv_nir_lower_multiview.c b/src/intel/vulkan_hasvk/anv_nir_lower_multiview.c index a409f1e..b66bebe 100644 --- a/src/intel/vulkan_hasvk/anv_nir_lower_multiview.c +++ b/src/intel/vulkan_hasvk/anv_nir_lower_multiview.c @@ -219,7 +219,7 @@ anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask) value = build_view_index(&state); } - nir_def_rewrite_uses(&load->dest.ssa, value); + nir_def_rewrite_uses(&load->def, value); nir_instr_remove(&load->instr); } diff --git a/src/intel/vulkan_hasvk/anv_nir_lower_ubo_loads.c b/src/intel/vulkan_hasvk/anv_nir_lower_ubo_loads.c index 5d715fb..8eef5ab 100644 --- a/src/intel/vulkan_hasvk/anv_nir_lower_ubo_loads.c +++ b/src/intel/vulkan_hasvk/anv_nir_lower_ubo_loads.c @@ -42,7 +42,7 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data) if (load->intrinsic == nir_intrinsic_load_global_constant_bounded) bound = load->src[2].ssa; - unsigned bit_size = load->dest.ssa.bit_size; + unsigned bit_size = load->def.bit_size; assert(bit_size >= 8 && bit_size % 8 == 0); unsigned byte_size = bit_size / 8; @@ -90,8 +90,8 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data) nir_push_if(b, in_bounds); nir_def *load_val = - nir_build_load_global_constant(b, load->dest.ssa.num_components, - load->dest.ssa.bit_size, addr, + nir_build_load_global_constant(b, load->def.num_components, + load->def.bit_size, addr, .access = nir_intrinsic_access(load), .align_mul = nir_intrinsic_align_mul(load), .align_offset = nir_intrinsic_align_offset(load)); @@ -100,15 +100,15 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data) val = nir_if_phi(b, load_val, zero); } else { - val = nir_build_load_global_constant(b, load->dest.ssa.num_components, - load->dest.ssa.bit_size, addr, + val = nir_build_load_global_constant(b, load->def.num_components, + load->def.bit_size, addr, .access = nir_intrinsic_access(load), .align_mul = nir_intrinsic_align_mul(load), .align_offset = nir_intrinsic_align_offset(load)); } } - nir_def_rewrite_uses(&load->dest.ssa, val); + nir_def_rewrite_uses(&load->def, val); nir_instr_remove(&load->instr); return true; diff --git a/src/intel/vulkan_hasvk/anv_nir_lower_ycbcr_textures.c b/src/intel/vulkan_hasvk/anv_nir_lower_ycbcr_textures.c index 8279888..edec25d 100644 --- a/src/intel/vulkan_hasvk/anv_nir_lower_ycbcr_textures.c +++ b/src/intel/vulkan_hasvk/anv_nir_lower_ycbcr_textures.c @@ -53,12 +53,12 @@ get_texture_size(struct ycbcr_state *state, nir_deref_instr *texture) tex->dest_type = nir_type_int32; tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &texture->dest.ssa); + &texture->def); - nir_def_init(&tex->instr, &tex->dest.ssa, nir_tex_instr_dest_size(tex), 32); + nir_def_init(&tex->instr, &tex->def, nir_tex_instr_dest_size(tex), 32); nir_builder_instr_insert(b, &tex->instr); - state->image_size = nir_i2f32(b, &tex->dest.ssa); + state->image_size = nir_i2f32(b, &tex->def); return state->image_size; } @@ -151,11 +151,11 @@ create_plane_tex_instr_implicit(struct ycbcr_state *state, tex->sampler_index = old_tex->sampler_index; tex->is_array = old_tex->is_array; - nir_def_init(&tex->instr, &tex->dest.ssa, old_tex->dest.ssa.num_components, - old_tex->dest.ssa.bit_size); + nir_def_init(&tex->instr, &tex->def, old_tex->def.num_components, + old_tex->def.bit_size); nir_builder_instr_insert(b, &tex->instr); - return &tex->dest.ssa; + return &tex->def; } static unsigned @@ -327,7 +327,7 @@ anv_nir_lower_ycbcr_textures_instr(nir_builder *builder, swizzled_bpcs); } - nir_def_rewrite_uses(&tex->dest.ssa, result); + nir_def_rewrite_uses(&tex->def, result); nir_instr_remove(&tex->instr); return true; diff --git a/src/mesa/main/ff_fragment_shader.c b/src/mesa/main/ff_fragment_shader.c index 428e523..2f17758 100644 --- a/src/mesa/main/ff_fragment_shader.c +++ b/src/mesa/main/ff_fragment_shader.c @@ -800,9 +800,9 @@ load_texture(struct texenv_fragment_program *p, GLuint unit) nir_deref_instr *deref = nir_build_deref_var(p->b, var); tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &deref->dest.ssa); + &deref->def); tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref, - &deref->dest.ssa); + &deref->def); nir_def *src2 = nir_channels(p->b, texcoord, @@ -819,8 +819,8 @@ load_texture(struct texenv_fragment_program *p, GLuint unit) tex->src[4] = nir_tex_src_for_ssa(nir_tex_src_comparator, src4); } - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); - p->src_texture[unit] = &tex->dest.ssa; + nir_def_init(&tex->instr, &tex->def, 4, 32); + p->src_texture[unit] = &tex->def; nir_builder_instr_insert(p->b, &tex->instr); BITSET_SET(p->b->shader->info.textures_used, unit); diff --git a/src/mesa/program/prog_to_nir.c b/src/mesa/program/prog_to_nir.c index 573f52b..5a270eb 100644 --- a/src/mesa/program/prog_to_nir.c +++ b/src/mesa/program/prog_to_nir.c @@ -422,10 +422,10 @@ ptn_tex(struct ptn_compile *c, nir_def **src, unsigned src_number = 0; instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &deref->dest.ssa); + &deref->def); src_number++; instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref, - &deref->dest.ssa); + &deref->def); src_number++; instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_coord, @@ -463,10 +463,10 @@ ptn_tex(struct ptn_compile *c, nir_def **src, assert(src_number == num_srcs); - nir_def_init(&instr->instr, &instr->dest.ssa, 4, 32); + nir_def_init(&instr->instr, &instr->def, 4, 32); nir_builder_instr_insert(b, &instr->instr); - return &instr->dest.ssa; + return &instr->def; } static const nir_op op_trans[MAX_OPCODE] = { diff --git a/src/mesa/state_tracker/st_atifs_to_nir.c b/src/mesa/state_tracker/st_atifs_to_nir.c index ff44a50..42eaf7a 100644 --- a/src/mesa/state_tracker/st_atifs_to_nir.c +++ b/src/mesa/state_tracker/st_atifs_to_nir.c @@ -360,16 +360,16 @@ compile_setupinst(struct st_translate *t, glsl_get_sampler_dim_coordinate_components(tex->sampler_dim); tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &tex_deref->dest.ssa); + &tex_deref->def); tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref, - &tex_deref->dest.ssa); + &tex_deref->def); tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_coord, nir_trim_vector(t->b, coord, tex->coord_components)); - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(t->b, &tex->instr); - t->temps[r] = &tex->dest.ssa; + t->temps[r] = &tex->def; } else if (texinst->Opcode == ATI_FRAGMENT_SHADER_PASS_OP) { t->temps[r] = coord; } diff --git a/src/mesa/state_tracker/st_cb_drawpixels.c b/src/mesa/state_tracker/st_cb_drawpixels.c index 796bd57..cb307c6 100644 --- a/src/mesa/state_tracker/st_cb_drawpixels.c +++ b/src/mesa/state_tracker/st_cb_drawpixels.c @@ -126,17 +126,17 @@ sample_via_nir(nir_builder *b, nir_variable *texcoord, tex->coord_components = 2; tex->dest_type = alu_type; tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &deref->dest.ssa); + &deref->def); tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref, - &deref->dest.ssa); + &deref->def); tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_coord, nir_trim_vector(b, nir_load_var(b, texcoord), tex->coord_components)); - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(b, &tex->instr); - return nir_channel(b, &tex->dest.ssa, 0); + return nir_channel(b, &tex->def, 0); } static void * diff --git a/src/mesa/state_tracker/st_nir_lower_builtin.c b/src/mesa/state_tracker/st_nir_lower_builtin.c index 2a14dd0..817038d 100644 --- a/src/mesa/state_tracker/st_nir_lower_builtin.c +++ b/src/mesa/state_tracker/st_nir_lower_builtin.c @@ -213,7 +213,7 @@ lower_builtin_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data) def = nir_swizzle(b, def, swiz, intrin->num_components); /* and rewrite uses of original instruction: */ - nir_def_rewrite_uses(&intrin->dest.ssa, def); + nir_def_rewrite_uses(&intrin->def, def); /* at this point intrin should be unused. We need to remove it * (rather than waiting for DCE pass) to avoid dangling reference diff --git a/src/mesa/state_tracker/st_nir_lower_tex_src_plane.c b/src/mesa/state_tracker/st_nir_lower_tex_src_plane.c index 96e2dca..019fd33 100644 --- a/src/mesa/state_tracker/st_nir_lower_tex_src_plane.c +++ b/src/mesa/state_tracker/st_nir_lower_tex_src_plane.c @@ -145,7 +145,7 @@ lower_tex_src_plane_block(nir_builder *b, lower_tex_src_state *state, nir_block assert(samp); nir_deref_instr *tex_deref_instr = nir_build_deref_var(b, samp); - nir_def *tex_deref = &tex_deref_instr->dest.ssa; + nir_def *tex_deref = &tex_deref_instr->def; nir_instr_rewrite_src(&tex->instr, &tex->src[tex_index].src, diff --git a/src/mesa/state_tracker/st_pbo.c b/src/mesa/state_tracker/st_pbo.c index 648ac7a..a23051b 100644 --- a/src/mesa/state_tracker/st_pbo.c +++ b/src/mesa/state_tracker/st_pbo.c @@ -521,14 +521,14 @@ create_fs(struct st_context *st, bool download, tex->dest_type = nir_get_nir_type_for_glsl_base_type(glsl_get_sampler_result_type(tex_var->type)); tex->src[0].src_type = nir_tex_src_texture_deref; - tex->src[0].src = nir_src_for_ssa(&tex_deref->dest.ssa); + tex->src[0].src = nir_src_for_ssa(&tex_deref->def); tex->src[1].src_type = nir_tex_src_sampler_deref; - tex->src[1].src = nir_src_for_ssa(&tex_deref->dest.ssa); + tex->src[1].src = nir_src_for_ssa(&tex_deref->def); tex->src[2].src_type = nir_tex_src_coord; tex->src[2].src = nir_src_for_ssa(texcoord); - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(&b, &tex->instr); - nir_def *result = &tex->dest.ssa; + nir_def *result = &tex->def; if (conversion == ST_PBO_CONVERT_SINT_TO_UINT) result = nir_imax(&b, result, zero); @@ -553,7 +553,7 @@ create_fs(struct st_context *st, bool download, img_var->data.image.format = format; nir_deref_instr *img_deref = nir_build_deref_var(&b, img_var); - nir_image_deref_store(&b, &img_deref->dest.ssa, + nir_image_deref_store(&b, &img_deref->def, nir_vec4(&b, pbo_addr, zero, zero, zero), zero, result, diff --git a/src/mesa/state_tracker/st_pbo_compute.c b/src/mesa/state_tracker/st_pbo_compute.c index 3909ea2..223ad5a 100644 --- a/src/mesa/state_tracker/st_pbo_compute.c +++ b/src/mesa/state_tracker/st_pbo_compute.c @@ -685,13 +685,13 @@ create_conversion_shader(struct st_context *st, enum pipe_texture_target target, txf->src[1] = nir_tex_src_for_ssa(nir_tex_src_lod, nir_imm_int(&b, 0)); txf->src[2].src_type = nir_tex_src_texture_deref; nir_deref_instr *sampler_deref = nir_build_deref_var(&b, sampler); - txf->src[2].src = nir_src_for_ssa(&sampler_deref->dest.ssa); + txf->src[2].src = nir_src_for_ssa(&sampler_deref->def); - nir_def_init(&txf->instr, &txf->dest.ssa, 4, 32); + nir_def_init(&txf->instr, &txf->def, 4, 32); nir_builder_instr_insert(&b, &txf->instr); /* pass the grid offset as the coord to get the zero-indexed buffer offset */ - do_shader_conversion(&b, &txf->dest.ssa, num_components, global_id, &sd); + do_shader_conversion(&b, &txf->def, num_components, global_id, &sd); nir_pop_if(&b, NULL); diff --git a/src/microsoft/clc/clc_compiler.c b/src/microsoft/clc/clc_compiler.c index aa95baa..9cfc2c7 100644 --- a/src/microsoft/clc/clc_compiler.c +++ b/src/microsoft/clc/clc_compiler.c @@ -145,7 +145,7 @@ clc_lower_input_image_deref(nir_builder *b, struct clc_image_lower_context *cont * arbitrary type for it. */ for (int pass = 0; pass < 2; ++pass) { - nir_foreach_use_safe(src, &context->deref->dest.ssa) { + nir_foreach_use_safe(src, &context->deref->def) { enum image_type type; if (src->parent_instr->type == nir_instr_type_intrinsic) { @@ -218,7 +218,7 @@ clc_lower_input_image_deref(nir_builder *b, struct clc_image_lower_context *cont } /* No actual intrinsic needed here, just reference the loaded variable */ - nir_def_rewrite_uses(&intrinsic->dest.ssa, *cached_deref); + nir_def_rewrite_uses(&intrinsic->def, *cached_deref); nir_instr_remove(&intrinsic->instr); break; } @@ -307,12 +307,12 @@ clc_lower_64bit_semantics(nir_shader *nir) if (nir_instr_ssa_def(instr)->bit_size != 64) continue; - intrinsic->dest.ssa.bit_size = 32; + intrinsic->def.bit_size = 32; b.cursor = nir_after_instr(instr); - nir_def *i64 = nir_u2u64(&b, &intrinsic->dest.ssa); + nir_def *i64 = nir_u2u64(&b, &intrinsic->def); nir_def_rewrite_uses_after( - &intrinsic->dest.ssa, + &intrinsic->def, i64, i64->parent_instr); } diff --git a/src/microsoft/clc/clc_nir.c b/src/microsoft/clc/clc_nir.c index c69d94b..9dbbf4c 100644 --- a/src/microsoft/clc/clc_nir.c +++ b/src/microsoft/clc/clc_nir.c @@ -35,14 +35,14 @@ static nir_def * load_ubo(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var, unsigned offset) { return nir_load_ubo(b, - intr->dest.ssa.num_components, - intr->dest.ssa.bit_size, + intr->def.num_components, + intr->def.bit_size, nir_imm_int(b, var->data.binding), nir_imm_int(b, offset), .align_mul = 256, .align_offset = offset, .range_base = offset, - .range = intr->dest.ssa.bit_size * intr->dest.ssa.num_components / 8); + .range = intr->def.bit_size * intr->def.num_components / 8); } static bool @@ -53,7 +53,7 @@ lower_load_base_global_invocation_id(nir_builder *b, nir_intrinsic_instr *intr, nir_def *offset = load_ubo(b, intr, var, offsetof(struct clc_work_properties_data, global_offset_x)); - nir_def_rewrite_uses(&intr->dest.ssa, offset); + nir_def_rewrite_uses(&intr->def, offset); nir_instr_remove(&intr->instr); return true; } @@ -66,7 +66,7 @@ lower_load_work_dim(nir_builder *b, nir_intrinsic_instr *intr, nir_def *dim = load_ubo(b, intr, var, offsetof(struct clc_work_properties_data, work_dim)); - nir_def_rewrite_uses(&intr->dest.ssa, dim); + nir_def_rewrite_uses(&intr->def, dim); nir_instr_remove(&intr->instr); return true; } @@ -80,7 +80,7 @@ lower_load_num_workgroups(nir_builder *b, nir_intrinsic_instr *intr, nir_def *count = load_ubo(b, intr, var, offsetof(struct clc_work_properties_data, group_count_total_x)); - nir_def_rewrite_uses(&intr->dest.ssa, count); + nir_def_rewrite_uses(&intr->def, count); nir_instr_remove(&intr->instr); return true; } @@ -94,7 +94,7 @@ lower_load_base_workgroup_id(nir_builder *b, nir_intrinsic_instr *intr, nir_def *offset = load_ubo(b, intr, var, offsetof(struct clc_work_properties_data, group_id_offset_x)); - nir_def_rewrite_uses(&intr->dest.ssa, offset); + nir_def_rewrite_uses(&intr->def, offset); nir_instr_remove(&intr->instr); return true; } @@ -146,7 +146,7 @@ lower_load_kernel_input(nir_builder *b, nir_intrinsic_instr *intr, { b->cursor = nir_before_instr(&intr->instr); - unsigned bit_size = intr->dest.ssa.bit_size; + unsigned bit_size = intr->def.bit_size; enum glsl_base_type base_type; switch (bit_size) { @@ -167,7 +167,7 @@ lower_load_kernel_input(nir_builder *b, nir_intrinsic_instr *intr, } const struct glsl_type *type = - glsl_vector_type(base_type, intr->dest.ssa.num_components); + glsl_vector_type(base_type, intr->def.num_components); nir_def *ptr = nir_vec2(b, nir_imm_int(b, var->data.binding), nir_u2uN(b, intr->src[0].ssa, 32)); nir_deref_instr *deref = nir_build_deref_cast(b, ptr, nir_var_mem_ubo, type, @@ -177,7 +177,7 @@ lower_load_kernel_input(nir_builder *b, nir_intrinsic_instr *intr, nir_def *result = nir_load_deref(b, deref); - nir_def_rewrite_uses(&intr->dest.ssa, result); + nir_def_rewrite_uses(&intr->def, result); nir_instr_remove(&intr->instr); return true; } @@ -245,9 +245,9 @@ clc_lower_printf_base(nir_shader *nir, unsigned uav_id) if (!printf_var) { printf_var = add_printf_var(nir, uav_id); nir_deref_instr *deref = nir_build_deref_var(&b, printf_var); - printf_deref = &deref->dest.ssa; + printf_deref = &deref->def; } - nir_def_rewrite_uses(&intrin->dest.ssa, printf_deref); + nir_def_rewrite_uses(&intrin->def, printf_deref); progress = true; } } diff --git a/src/microsoft/compiler/dxil_nir.c b/src/microsoft/compiler/dxil_nir.c index 1c36355..8eb57de 100644 --- a/src/microsoft/compiler/dxil_nir.c +++ b/src/microsoft/compiler/dxil_nir.c @@ -70,8 +70,8 @@ load_comps_to_vec(nir_builder *b, unsigned src_bit_size, static bool lower_32b_offset_load(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var) { - unsigned bit_size = intr->dest.ssa.bit_size; - unsigned num_components = intr->dest.ssa.num_components; + unsigned bit_size = intr->def.bit_size; + unsigned num_components = intr->def.num_components; unsigned num_bits = num_components * bit_size; b->cursor = nir_before_instr(&intr->instr); @@ -115,7 +115,7 @@ lower_32b_offset_load(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *v } nir_def *result = nir_vec(b, comps, num_components); - nir_def_rewrite_uses(&intr->dest.ssa, result); + nir_def_rewrite_uses(&intr->def, result); nir_instr_remove(&intr->instr); return true; @@ -139,8 +139,8 @@ lower_masked_store_vec32(nir_builder *b, nir_def *offset, nir_def *index, if (var->data.mode == nir_var_mem_shared) { /* Use the dedicated masked intrinsic */ nir_deref_instr *deref = nir_build_deref_array(b, nir_build_deref_var(b, var), index); - nir_deref_atomic(b, 32, &deref->dest.ssa, nir_inot(b, mask), .atomic_op = nir_atomic_op_iand); - nir_deref_atomic(b, 32, &deref->dest.ssa, vec32, .atomic_op = nir_atomic_op_ior); + nir_deref_atomic(b, 32, &deref->def, nir_inot(b, mask), .atomic_op = nir_atomic_op_iand); + nir_deref_atomic(b, 32, &deref->def, vec32, .atomic_op = nir_atomic_op_ior); } else { /* For scratch, since we don't need atomics, just generate the read-modify-write in NIR */ nir_def *load = nir_load_array_var(b, var, index); @@ -269,7 +269,7 @@ dxil_nir_lower_constant_to_temp(nir_shader *nir) if (parent && parent->var->data.mode != nir_var_mem_constant) { deref->modes = parent->var->data.mode; /* Also change "pointer" size to 32-bit since this is now a logical pointer */ - deref->dest.ssa.bit_size = 32; + deref->def.bit_size = 32; if (deref->deref_type == nir_deref_type_array) { b.cursor = nir_before_instr(instr); nir_src_rewrite(&deref->arr.index, nir_u2u32(&b, deref->arr.index.ssa)); @@ -343,7 +343,7 @@ flatten_var_arrays(nir_builder *b, nir_instr *instr, void *data) nir_deref_instr *comp_deref = nir_build_deref_array(b, new_var_deref, final_index); components[i] = nir_load_deref(b, comp_deref); } - nir_def_rewrite_uses(&intr->dest.ssa, nir_vec(b, components, vector_comps)); + nir_def_rewrite_uses(&intr->def, nir_vec(b, components, vector_comps)); } else if (intr->intrinsic == nir_intrinsic_store_deref) { for (unsigned i = 0; i < vector_comps; ++i) { if (((1 << i) & nir_intrinsic_write_mask(intr)) == 0) @@ -355,7 +355,7 @@ flatten_var_arrays(nir_builder *b, nir_instr *instr, void *data) } nir_instr_remove(instr); } else { - nir_src_rewrite(&intr->src[0], &nir_build_deref_array(b, new_var_deref, index)->dest.ssa); + nir_src_rewrite(&intr->src[0], &nir_build_deref_array(b, new_var_deref, index)->def); } nir_deref_path_finish(&path); @@ -460,10 +460,10 @@ lower_deref_bit_size(nir_builder *b, nir_instr *instr, void *data) if (glsl_get_bit_size(old_glsl_type) < glsl_get_bit_size(var_scalar_type)) { deref->type = var_scalar_type; if (intr->intrinsic == nir_intrinsic_load_deref) { - intr->dest.ssa.bit_size = glsl_get_bit_size(var_scalar_type); + intr->def.bit_size = glsl_get_bit_size(var_scalar_type); b->cursor = nir_after_instr(instr); - nir_def *downcast = nir_type_convert(b, &intr->dest.ssa, new_type, old_type, nir_rounding_mode_undef); - nir_def_rewrite_uses_after(&intr->dest.ssa, downcast, downcast->parent_instr); + nir_def *downcast = nir_type_convert(b, &intr->def, new_type, old_type, nir_rounding_mode_undef); + nir_def_rewrite_uses_after(&intr->def, downcast, downcast->parent_instr); } else { b->cursor = nir_before_instr(instr); @@ -490,7 +490,7 @@ lower_deref_bit_size(nir_builder *b, nir_instr *instr, void *data) if (intr->intrinsic == nir_intrinsic_load_deref) { nir_def *src1 = nir_load_deref(b, deref); nir_def *src2 = nir_load_deref(b, deref2); - nir_def_rewrite_uses(&intr->dest.ssa, nir_pack_64_2x32_split(b, src1, src2)); + nir_def_rewrite_uses(&intr->def, nir_pack_64_2x32_split(b, src1, src2)); } else { nir_def *src1 = nir_unpack_64_2x32_split_x(b, intr->src[1].ssa); nir_def *src2 = nir_unpack_64_2x32_split_y(b, intr->src[1].ssa); @@ -626,13 +626,13 @@ lower_shared_atomic(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var nir_deref_instr *deref = nir_build_deref_array(b, nir_build_deref_var(b, var), index); nir_def *result; if (intr->intrinsic == nir_intrinsic_shared_atomic_swap) - result = nir_deref_atomic_swap(b, 32, &deref->dest.ssa, intr->src[1].ssa, intr->src[2].ssa, + result = nir_deref_atomic_swap(b, 32, &deref->def, intr->src[1].ssa, intr->src[2].ssa, .atomic_op = nir_intrinsic_atomic_op(intr)); else - result = nir_deref_atomic(b, 32, &deref->dest.ssa, intr->src[1].ssa, + result = nir_deref_atomic(b, 32, &deref->def, intr->src[1].ssa, .atomic_op = nir_intrinsic_atomic_op(intr)); - nir_def_rewrite_uses(&intr->dest.ssa, result); + nir_def_rewrite_uses(&intr->def, result); nir_instr_remove(&intr->instr); return true; } @@ -717,8 +717,8 @@ lower_deref_ssbo(nir_builder *b, nir_deref_instr *deref) nir_deref_instr *deref_cast = nir_build_deref_cast(b, ptr, nir_var_mem_ssbo, deref->type, glsl_get_explicit_stride(var->type)); - nir_def_rewrite_uses(&deref->dest.ssa, - &deref_cast->dest.ssa); + nir_def_rewrite_uses(&deref->def, + &deref_cast->def); nir_instr_remove(&deref->instr); deref = deref_cast; @@ -822,7 +822,7 @@ cast_phi(nir_builder *b, nir_phi_instr *phi, unsigned new_bit_size) { nir_phi_instr *lowered = nir_phi_instr_create(b->shader); int num_components = 0; - int old_bit_size = phi->dest.ssa.bit_size; + int old_bit_size = phi->def.bit_size; nir_foreach_phi_src(src, phi) { assert(num_components == 0 || num_components == src->src.ssa->num_components); @@ -835,16 +835,16 @@ cast_phi(nir_builder *b, nir_phi_instr *phi, unsigned new_bit_size) nir_phi_instr_add_src(lowered, src->pred, nir_src_for_ssa(cast)); } - nir_def_init(&lowered->instr, &lowered->dest.ssa, num_components, + nir_def_init(&lowered->instr, &lowered->def, num_components, new_bit_size); b->cursor = nir_before_instr(&phi->instr); nir_builder_instr_insert(b, &lowered->instr); b->cursor = nir_after_phis(nir_cursor_current_block(b->cursor)); - nir_def *result = nir_u2uN(b, &lowered->dest.ssa, old_bit_size); + nir_def *result = nir_u2uN(b, &lowered->def, old_bit_size); - nir_def_rewrite_uses(&phi->dest.ssa, result); + nir_def_rewrite_uses(&phi->def, result); nir_instr_remove(&phi->instr); } @@ -856,8 +856,8 @@ upcast_phi_impl(nir_function_impl *impl, unsigned min_bit_size) nir_foreach_block_reverse(block, impl) { nir_foreach_phi_safe(phi, block) { - if (phi->dest.ssa.bit_size == 1 || - phi->dest.ssa.bit_size >= min_bit_size) + if (phi->def.bit_size == 1 || + phi->def.bit_size >= min_bit_size) continue; cast_phi(&b, phi, min_bit_size); @@ -1002,7 +1002,7 @@ dxil_nir_split_clip_cull_distance_instr(nir_builder *b, new_intermediate_deref = nir_build_deref_array(b, new_intermediate_deref, parent->arr.index.ssa); } nir_deref_instr *new_array_deref = nir_build_deref_array(b, new_intermediate_deref, nir_imm_int(b, total_index % 4)); - nir_def_rewrite_uses(&deref->dest.ssa, &new_array_deref->dest.ssa); + nir_def_rewrite_uses(&deref->def, &new_array_deref->def); return true; } @@ -1157,7 +1157,7 @@ lower_load_local_group_size(nir_builder *b, nir_intrinsic_instr *intr) nir_const_value_for_int(b->shader->info.workgroup_size[2], 32) }; nir_def *size = nir_build_imm(b, 3, 32, v); - nir_def_rewrite_uses(&intr->dest.ssa, size); + nir_def_rewrite_uses(&intr->def, size); nir_instr_remove(&intr->instr); } @@ -1289,7 +1289,7 @@ redirect_sampler_derefs(struct nir_builder *b, nir_instr *instr, void *data) } nir_deref_path_finish(&path); - nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[sampler_idx].src, &new_tail->dest.ssa); + nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[sampler_idx].src, &new_tail->def); return true; } @@ -1368,7 +1368,7 @@ redirect_texture_derefs(struct nir_builder *b, nir_instr *instr, void *data) } nir_deref_path_finish(&path); - nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[texture_idx].src, &new_tail->dest.ssa); + nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[texture_idx].src, &new_tail->def); return true; } @@ -1420,10 +1420,10 @@ lower_sysval_to_load_input_impl(nir_builder *b, nir_instr *instr, void *data) const nir_alu_type dest_type = (sysval == SYSTEM_VALUE_FRONT_FACE) ? nir_type_uint32 : nir_get_nir_type_for_glsl_type(var->type); const unsigned bit_size = (sysval == SYSTEM_VALUE_FRONT_FACE) - ? 32 : intr->dest.ssa.bit_size; + ? 32 : intr->def.bit_size; b->cursor = nir_before_instr(instr); - nir_def *result = nir_load_input(b, intr->dest.ssa.num_components, bit_size, nir_imm_int(b, 0), + nir_def *result = nir_load_input(b, intr->def.num_components, bit_size, nir_imm_int(b, 0), .base = var->data.driver_location, .dest_type = dest_type); /* The nir_type_uint32 is really a nir_type_bool32, but that type is very @@ -1433,7 +1433,7 @@ lower_sysval_to_load_input_impl(nir_builder *b, nir_instr *instr, void *data) if (sysval == SYSTEM_VALUE_FRONT_FACE) result = nir_ine_imm(b, result, 0); - nir_def_rewrite_uses(&intr->dest.ssa, result); + nir_def_rewrite_uses(&intr->def, result); return true; } @@ -1609,7 +1609,7 @@ lower_ubo_array_one_to_static(struct nir_builder *b, nir_instr *inst, // Indexing out of bounds on array of UBOs is considered undefined // behavior. Therefore, we just hardcode all the index to 0. - uint8_t bit_size = index->dest.ssa.bit_size; + uint8_t bit_size = index->def.bit_size; nir_def *zero = nir_imm_intN_t(b, 0, bit_size); nir_def *dest = nir_vulkan_resource_index(b, index->num_components, bit_size, zero, @@ -1617,7 +1617,7 @@ lower_ubo_array_one_to_static(struct nir_builder *b, nir_instr *inst, .binding = nir_intrinsic_binding(index), .desc_type = nir_intrinsic_desc_type(index)); - nir_def_rewrite_uses(&index->dest.ssa, dest); + nir_def_rewrite_uses(&index->def, dest); return true; } @@ -1921,7 +1921,7 @@ lower_subgroup_id(nir_builder *b, nir_instr *instr, void *data) /* When using Nx1x1 groups, use a simple stable algorithm * which is almost guaranteed to be correct. */ nir_def *subgroup_id = nir_udiv(b, nir_load_local_invocation_index(b), nir_load_subgroup_size(b)); - nir_def_rewrite_uses(&intr->dest.ssa, subgroup_id); + nir_def_rewrite_uses(&intr->def, subgroup_id); return true; } @@ -1944,7 +1944,7 @@ lower_subgroup_id(nir_builder *b, nir_instr *instr, void *data) .memory_modes = nir_var_mem_shared); nif = nir_push_if(b, nir_elect(b, 1)); - nir_def *subgroup_id_first_thread = nir_deref_atomic(b, 32, &counter_deref->dest.ssa, nir_imm_int(b, 1), + nir_def *subgroup_id_first_thread = nir_deref_atomic(b, 32, &counter_deref->def, nir_imm_int(b, 1), .atomic_op = nir_atomic_op_iadd); nir_store_var(b, subgroup_id_local, subgroup_id_first_thread, 1); nir_pop_if(b, nif); @@ -1952,7 +1952,7 @@ lower_subgroup_id(nir_builder *b, nir_instr *instr, void *data) nir_def *subgroup_id_loaded = nir_load_var(b, subgroup_id_local); *subgroup_id = nir_read_first_invocation(b, subgroup_id_loaded); } - nir_def_rewrite_uses(&intr->dest.ssa, *subgroup_id); + nir_def_rewrite_uses(&intr->def, *subgroup_id); return true; } @@ -1980,7 +1980,7 @@ lower_num_subgroups(nir_builder *b, nir_instr *instr, void *data) nir_imul(b, nir_channel(b, workgroup_size_vec, 1), nir_channel(b, workgroup_size_vec, 2))); nir_def *ret = nir_idiv(b, nir_iadd(b, workgroup_size, size_minus_one), subgroup_size); - nir_def_rewrite_uses(&intr->dest.ssa, ret); + nir_def_rewrite_uses(&intr->def, ret); return true; } @@ -2015,24 +2015,24 @@ split_unaligned_load(nir_builder *b, nir_intrinsic_instr *intrin, unsigned align { enum gl_access_qualifier access = nir_intrinsic_access(intrin); nir_def *srcs[NIR_MAX_VEC_COMPONENTS * NIR_MAX_VEC_COMPONENTS * sizeof(int64_t) / 8]; - unsigned comp_size = intrin->dest.ssa.bit_size / 8; - unsigned num_comps = intrin->dest.ssa.num_components; + unsigned comp_size = intrin->def.bit_size / 8; + unsigned num_comps = intrin->def.num_components; b->cursor = nir_before_instr(&intrin->instr); nir_deref_instr *ptr = nir_src_as_deref(intrin->src[0]); const struct glsl_type *cast_type = get_cast_type(alignment * 8); - nir_deref_instr *cast = nir_build_deref_cast(b, &ptr->dest.ssa, ptr->modes, cast_type, alignment); + nir_deref_instr *cast = nir_build_deref_cast(b, &ptr->def, ptr->modes, cast_type, alignment); unsigned num_loads = DIV_ROUND_UP(comp_size * num_comps, alignment); for (unsigned i = 0; i < num_loads; ++i) { - nir_deref_instr *elem = nir_build_deref_ptr_as_array(b, cast, nir_imm_intN_t(b, i, cast->dest.ssa.bit_size)); + nir_deref_instr *elem = nir_build_deref_ptr_as_array(b, cast, nir_imm_intN_t(b, i, cast->def.bit_size)); srcs[i] = nir_load_deref_with_access(b, elem, access); } - nir_def *new_dest = nir_extract_bits(b, srcs, num_loads, 0, num_comps, intrin->dest.ssa.bit_size); - nir_def_rewrite_uses(&intrin->dest.ssa, new_dest); + nir_def *new_dest = nir_extract_bits(b, srcs, num_loads, 0, num_comps, intrin->def.bit_size); + nir_def_rewrite_uses(&intrin->def, new_dest); nir_instr_remove(&intrin->instr); } @@ -2050,12 +2050,12 @@ split_unaligned_store(nir_builder *b, nir_intrinsic_instr *intrin, unsigned alig nir_deref_instr *ptr = nir_src_as_deref(intrin->src[0]); const struct glsl_type *cast_type = get_cast_type(alignment * 8); - nir_deref_instr *cast = nir_build_deref_cast(b, &ptr->dest.ssa, ptr->modes, cast_type, alignment); + nir_deref_instr *cast = nir_build_deref_cast(b, &ptr->def, ptr->modes, cast_type, alignment); unsigned num_stores = DIV_ROUND_UP(comp_size * num_comps, alignment); for (unsigned i = 0; i < num_stores; ++i) { nir_def *substore_val = nir_extract_bits(b, &value, 1, i * alignment * 8, 1, alignment * 8); - nir_deref_instr *elem = nir_build_deref_ptr_as_array(b, cast, nir_imm_intN_t(b, i, cast->dest.ssa.bit_size)); + nir_deref_instr *elem = nir_build_deref_ptr_as_array(b, cast, nir_imm_intN_t(b, i, cast->def.bit_size)); nir_store_deref_with_access(b, elem, substore_val, ~0, access); } @@ -2096,7 +2096,7 @@ dxil_nir_split_unaligned_loads_stores(nir_shader *shader, nir_variable_mode mode nir_def *val; if (intrin->intrinsic == nir_intrinsic_load_deref) { - val = &intrin->dest.ssa; + val = &intrin->def; } else { val = intrin->src[1].ssa; } @@ -2134,8 +2134,8 @@ lower_inclusive_to_exclusive(nir_builder *b, nir_intrinsic_instr *intr) nir_intrinsic_set_reduction_op(intr, op); nir_def *final_val = nir_build_alu2(b, nir_intrinsic_reduction_op(intr), - &intr->dest.ssa, intr->src[0].ssa); - nir_def_rewrite_uses_after(&intr->dest.ssa, final_val, final_val->parent_instr); + &intr->def, intr->src[0].ssa); + nir_def_rewrite_uses_after(&intr->def, final_val, final_val->parent_instr); } static bool @@ -2169,7 +2169,7 @@ lower_subgroup_scan(nir_builder *b, nir_instr *instr, void *data) nir_def *subgroup_id = nir_load_subgroup_invocation(b); nir_def *active_threads = nir_ballot(b, 4, 32, nir_imm_true(b)); nir_def *base_value; - uint32_t bit_size = intr->dest.ssa.bit_size; + uint32_t bit_size = intr->def.bit_size; if (op == nir_op_iand || op == nir_op_umin) base_value = nir_imm_intN_t(b, ~0ull, bit_size); else if (op == nir_op_imin) @@ -2208,7 +2208,7 @@ lower_subgroup_scan(nir_builder *b, nir_instr *instr, void *data) nir_pop_loop(b, loop); result = nir_load_var(b, result_var); - nir_def_rewrite_uses(&intr->dest.ssa, result); + nir_def_rewrite_uses(&intr->def, result); return true; } @@ -2239,7 +2239,7 @@ lower_load_face(nir_builder *b, nir_instr *instr, void *data) nir_variable *var = data; nir_def *load = nir_ine_imm(b, nir_load_var(b, var), 0); - nir_def_rewrite_uses(&intr->dest.ssa, load); + nir_def_rewrite_uses(&intr->def, load); nir_instr_remove(instr); return true; } diff --git a/src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c b/src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c index 4074240..b59a648 100644 --- a/src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c +++ b/src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c @@ -188,10 +188,10 @@ create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_def *coor array_tex->src[i].src_type = tex->src[i].src_type; } - nir_def_init(&array_tex->instr, &array_tex->dest.ssa, + nir_def_init(&array_tex->instr, &array_tex->def, nir_tex_instr_dest_size(array_tex), 32); nir_builder_instr_insert(b, &array_tex->instr); - return &array_tex->dest.ssa; + return &array_tex->def; } static nir_def * @@ -426,12 +426,12 @@ lower_cube_txs(nir_builder *b, nir_tex_instr *tex) { b->cursor = nir_after_instr(&tex->instr); if (!tex->is_array) - return nir_trim_vector(b, &tex->dest.ssa, 2); + return nir_trim_vector(b, &tex->def, 2); - nir_def *array_dim = nir_channel(b, &tex->dest.ssa, 2); + nir_def *array_dim = nir_channel(b, &tex->def, 2); nir_def *cube_array_dim = nir_idiv(b, array_dim, nir_imm_int(b, 6)); - return nir_vec3(b, nir_channel(b, &tex->dest.ssa, 0), - nir_channel(b, &tex->dest.ssa, 1), + return nir_vec3(b, nir_channel(b, &tex->def, 0), + nir_channel(b, &tex->def, 1), cube_array_dim); } @@ -440,12 +440,12 @@ lower_cube_image_size(nir_builder *b, nir_intrinsic_instr *intr) { b->cursor = nir_after_instr(&intr->instr); if (!nir_intrinsic_image_array(intr)) - return nir_trim_vector(b, &intr->dest.ssa, 2); + return nir_trim_vector(b, &intr->def, 2); - nir_def *array_dim = nir_channel(b, &intr->dest.ssa, 2); + nir_def *array_dim = nir_channel(b, &intr->def, 2); nir_def *cube_array_dim = nir_idiv(b, array_dim, nir_imm_int(b, 6)); - return nir_vec3(b, nir_channel(b, &intr->dest.ssa, 0), - nir_channel(b, &intr->dest.ssa, 1), + return nir_vec3(b, nir_channel(b, &intr->def, 0), + nir_channel(b, &intr->def, 1), cube_array_dim); } diff --git a/src/microsoft/compiler/dxil_nir_lower_int_samplers.c b/src/microsoft/compiler/dxil_nir_lower_int_samplers.c index ea21cee..885f6da 100644 --- a/src/microsoft/compiler/dxil_nir_lower_int_samplers.c +++ b/src/microsoft/compiler/dxil_nir_lower_int_samplers.c @@ -105,11 +105,11 @@ dx_get_texture_lod(nir_builder *b, nir_tex_instr *tex) } } - nir_def_init(&tql->instr, &tql->dest.ssa, 2, 32); + nir_def_init(&tql->instr, &tql->def, 2, 32); nir_builder_instr_insert(b, &tql->instr); /* DirectX LOD only has a value in x channel */ - return nir_channel(b, &tql->dest.ssa, 0); + return nir_channel(b, &tql->def, 0); } typedef struct { @@ -224,7 +224,7 @@ static nir_def * load_bordercolor(nir_builder *b, nir_tex_instr *tex, const dxil_wrap_sampler_state *active_state, const dxil_texture_swizzle_state *tex_swizzle) { - int ndest_comp = tex->dest.ssa.num_components; + int ndest_comp = tex->def.num_components; unsigned swizzle[4] = { tex_swizzle->swizzle_r, @@ -296,7 +296,7 @@ create_txf_from_tex(nir_builder *b, nir_tex_instr *tex) } } - nir_def_init(&txf->instr, &txf->dest.ssa, nir_tex_instr_dest_size(txf), 32); + nir_def_init(&txf->instr, &txf->def, nir_tex_instr_dest_size(txf), 32); nir_builder_instr_insert(b, &txf->instr); return txf; @@ -328,7 +328,7 @@ load_texel(nir_builder *b, nir_tex_instr *tex, wrap_lower_param_t *params) nir_tex_instr_add_src(load, nir_tex_src_lod, nir_src_for_ssa(params->lod)); nir_tex_instr_add_src(load, nir_tex_src_coord, nir_src_for_ssa(texcoord)); b->cursor = nir_after_instr(&load->instr); - return &load->dest.ssa; + return &load->def; } typedef struct { diff --git a/src/microsoft/compiler/dxil_nir_lower_vs_vertex_conversion.c b/src/microsoft/compiler/dxil_nir_lower_vs_vertex_conversion.c index 0e55d9b..40c81f8 100644 --- a/src/microsoft/compiler/dxil_nir_lower_vs_vertex_conversion.c +++ b/src/microsoft/compiler/dxil_nir_lower_vs_vertex_conversion.c @@ -111,11 +111,11 @@ lower_vs_vertex_conversion_impl(nir_builder *b, nir_instr *instr, void *options) fmt == PIPE_FORMAT_R8G8B8_UINT || fmt == PIPE_FORMAT_R16G16B16_SINT || fmt == PIPE_FORMAT_R16G16B16_UINT); - if (intr->dest.ssa.num_components == 3) + if (intr->def.num_components == 3) return NULL; - return nir_vector_insert_imm(b, &intr->dest.ssa, nir_imm_int(b, 1), 3); + return nir_vector_insert_imm(b, &intr->def, nir_imm_int(b, 1), 3); } else { - nir_def *src = nir_channel(b, &intr->dest.ssa, 0); + nir_def *src = nir_channel(b, &intr->def, 0); switch (fmt) { case PIPE_FORMAT_R10G10B10A2_SNORM: @@ -134,10 +134,10 @@ lower_vs_vertex_conversion_impl(nir_builder *b, nir_instr *instr, void *options) return from_10_10_10_2_scaled(b, src, lshift_bgra(b), nir_ushr); case PIPE_FORMAT_R8G8B8A8_USCALED: case PIPE_FORMAT_R16G16B16A16_USCALED: - return nir_u2f32(b, &intr->dest.ssa); + return nir_u2f32(b, &intr->def); case PIPE_FORMAT_R8G8B8A8_SSCALED: case PIPE_FORMAT_R16G16B16A16_SSCALED: - return nir_i2f32(b, &intr->dest.ssa); + return nir_i2f32(b, &intr->def); default: unreachable("Unsupported emulated vertex format"); diff --git a/src/microsoft/compiler/dxil_nir_tess.c b/src/microsoft/compiler/dxil_nir_tess.c index b9d4625..a3a635a 100644 --- a/src/microsoft/compiler/dxil_nir_tess.c +++ b/src/microsoft/compiler/dxil_nir_tess.c @@ -223,10 +223,10 @@ dxil_nir_split_tess_ctrl(nir_shader *nir, nir_function **patch_const_func) continue; nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); if (intr->intrinsic != nir_intrinsic_load_invocation_id || - list_is_empty(&intr->dest.ssa.uses) || - list_is_singular(&intr->dest.ssa.uses)) + list_is_empty(&intr->def.uses) || + list_is_singular(&intr->def.uses)) continue; - nir_foreach_use_including_if_safe(src, &intr->dest.ssa) { + nir_foreach_use_including_if_safe(src, &intr->def) { b.cursor = nir_before_src(src); nir_src_rewrite(src, nir_load_invocation_id(&b)); } @@ -254,7 +254,7 @@ dxil_nir_split_tess_ctrl(nir_shader *nir, nir_function **patch_const_func) b.cursor = state.begin_cursor = get_cursor_for_instr_without_cf(instr); start_tcs_loop(&b, &state, loop_var_deref); } - nir_def_rewrite_uses(&intr->dest.ssa, state.count); + nir_def_rewrite_uses(&intr->def, state.count); break; } case nir_intrinsic_barrier: @@ -312,8 +312,8 @@ remove_tess_level_accesses(nir_builder *b, nir_instr *instr, void *_data) nir_instr_remove(instr); } else { b->cursor = nir_after_instr(instr); - assert(intr->dest.ssa.num_components == 1); - nir_def_rewrite_uses(&intr->dest.ssa, nir_undef(b, 1, intr->dest.ssa.bit_size)); + assert(intr->def.num_components == 1); + nir_def_rewrite_uses(&intr->def, nir_undef(b, 1, intr->def.bit_size)); } return true; } diff --git a/src/microsoft/compiler/nir_to_dxil.c b/src/microsoft/compiler/nir_to_dxil.c index 2c1ae59..b5c5791 100644 --- a/src/microsoft/compiler/nir_to_dxil.c +++ b/src/microsoft/compiler/nir_to_dxil.c @@ -2503,10 +2503,10 @@ static enum overload_type get_ambiguous_overload(struct ntd_context *ctx, nir_intrinsic_instr *intr, enum overload_type default_type) { - if (BITSET_TEST(ctx->int_types, intr->dest.ssa.index)) - return get_overload(nir_type_int, intr->dest.ssa.bit_size); - if (BITSET_TEST(ctx->float_types, intr->dest.ssa.index)) - return get_overload(nir_type_float, intr->dest.ssa.bit_size); + if (BITSET_TEST(ctx->int_types, intr->def.index)) + return get_overload(nir_type_int, intr->def.bit_size); + if (BITSET_TEST(ctx->float_types, intr->def.index)) + return get_overload(nir_type_float, intr->def.bit_size); return default_type; } @@ -2514,7 +2514,7 @@ static enum overload_type get_ambiguous_overload_alu_type(struct ntd_context *ctx, nir_intrinsic_instr *intr, nir_alu_type alu_type) { - return get_ambiguous_overload(ctx, intr, get_overload(alu_type, intr->dest.ssa.bit_size)); + return get_ambiguous_overload(ctx, intr, get_overload(alu_type, intr->def.bit_size)); } static bool @@ -3116,7 +3116,7 @@ static bool emit_load_global_invocation_id(struct ntd_context *ctx, nir_intrinsic_instr *intr) { - nir_component_mask_t comps = nir_def_components_read(&intr->dest.ssa); + nir_component_mask_t comps = nir_def_components_read(&intr->def); for (int i = 0; i < nir_intrinsic_dest_components(intr); i++) { if (comps & (1 << i)) { @@ -3128,7 +3128,7 @@ emit_load_global_invocation_id(struct ntd_context *ctx, if (!globalid) return false; - store_def(ctx, &intr->dest.ssa, i, globalid); + store_def(ctx, &intr->def, i, globalid); } } return true; @@ -3138,7 +3138,7 @@ static bool emit_load_local_invocation_id(struct ntd_context *ctx, nir_intrinsic_instr *intr) { - nir_component_mask_t comps = nir_def_components_read(&intr->dest.ssa); + nir_component_mask_t comps = nir_def_components_read(&intr->def); for (int i = 0; i < nir_intrinsic_dest_components(intr); i++) { if (comps & (1 << i)) { @@ -3150,7 +3150,7 @@ emit_load_local_invocation_id(struct ntd_context *ctx, *threadidingroup = emit_threadidingroup_call(ctx, idx); if (!threadidingroup) return false; - store_def(ctx, &intr->dest.ssa, i, threadidingroup); + store_def(ctx, &intr->def, i, threadidingroup); } } return true; @@ -3164,7 +3164,7 @@ emit_load_local_invocation_index(struct ntd_context *ctx, *flattenedthreadidingroup = emit_flattenedthreadidingroup_call(ctx); if (!flattenedthreadidingroup) return false; - store_def(ctx, &intr->dest.ssa, 0, flattenedthreadidingroup); + store_def(ctx, &intr->def, 0, flattenedthreadidingroup); return true; } @@ -3173,7 +3173,7 @@ static bool emit_load_local_workgroup_id(struct ntd_context *ctx, nir_intrinsic_instr *intr) { - nir_component_mask_t comps = nir_def_components_read(&intr->dest.ssa); + nir_component_mask_t comps = nir_def_components_read(&intr->def); for (int i = 0; i < nir_intrinsic_dest_components(intr); i++) { if (comps & (1 << i)) { @@ -3183,7 +3183,7 @@ emit_load_local_workgroup_id(struct ntd_context *ctx, const struct dxil_value *groupid = emit_groupid_call(ctx, idx); if (!groupid) return false; - store_def(ctx, &intr->dest.ssa, i, groupid); + store_def(ctx, &intr->def, i, groupid); } } return true; @@ -3217,8 +3217,8 @@ emit_load_unary_external_function(struct ntd_context *ctx, nir_alu_type type) { const struct dxil_value *value = call_unary_external_function(ctx, name, dxil_intr, - get_overload(type, intr->dest.ssa.bit_size)); - store_def(ctx, &intr->dest.ssa, 0, value); + get_overload(type, intr->def.bit_size)); + store_def(ctx, &intr->def, 0, value); return true; } @@ -3237,7 +3237,7 @@ emit_load_sample_mask_in(struct ntd_context *ctx, nir_intrinsic_instr *intr) call_unary_external_function(ctx, "dx.op.sampleIndex", DXIL_INTR_SAMPLE_INDEX, DXIL_I32), 0), 0); } - store_def(ctx, &intr->dest.ssa, 0, value); + store_def(ctx, &intr->def, 0, value); return true; } @@ -3267,12 +3267,12 @@ emit_load_tess_coord(struct ntd_context *ctx, const struct dxil_value *value = dxil_emit_call(&ctx->mod, func, args, ARRAY_SIZE(args)); - store_def(ctx, &intr->dest.ssa, i, value); + store_def(ctx, &intr->def, i, value); } - for (unsigned i = num_coords; i < intr->dest.ssa.num_components; ++i) { + for (unsigned i = num_coords; i < intr->def.num_components; ++i) { const struct dxil_value *value = dxil_module_get_float_const(&ctx->mod, 0.0f); - store_def(ctx, &intr->dest.ssa, i, value); + store_def(ctx, &intr->def, i, value); } return true; @@ -3441,7 +3441,7 @@ emit_load_ssbo(struct ntd_context *ctx, nir_intrinsic_instr *intr) emit_raw_bufferload_call(ctx, handle, coord, overload, nir_intrinsic_dest_components(intr), - intr->dest.ssa.bit_size / 8) : + intr->def.bit_size / 8) : emit_bufferload_call(ctx, handle, coord, overload); if (!load) return false; @@ -3451,9 +3451,9 @@ emit_load_ssbo(struct ntd_context *ctx, nir_intrinsic_instr *intr) dxil_emit_extractval(&ctx->mod, load, i); if (!val) return false; - store_def(ctx, &intr->dest.ssa, i, val); + store_def(ctx, &intr->def, i, val); } - if (intr->dest.ssa.bit_size == 16) + if (intr->def.bit_size == 16) ctx->mod.feats.native_low_precision = true; return true; } @@ -3527,11 +3527,11 @@ emit_load_ubo_vec4(struct ntd_context *ctx, nir_intrinsic_instr *intr) unsigned first_component = nir_intrinsic_has_component(intr) ? nir_intrinsic_component(intr) : 0; - for (unsigned i = 0; i < intr->dest.ssa.num_components; i++) - store_def(ctx, &intr->dest.ssa, i, + for (unsigned i = 0; i < intr->def.num_components; i++) + store_def(ctx, &intr->def, i, dxil_emit_extractval(&ctx->mod, agg, i + first_component)); - if (intr->dest.ssa.bit_size == 16) + if (intr->def.bit_size == 16) ctx->mod.feats.native_low_precision = true; return true; } @@ -3724,7 +3724,7 @@ emit_load_input_via_intrinsic(struct ntd_context *ctx, nir_intrinsic_instr *intr row = get_src(ctx, &intr->src[row_index], 0, nir_type_int); nir_alu_type out_type = nir_intrinsic_dest_type(intr); - enum overload_type overload = get_overload(out_type, intr->dest.ssa.bit_size); + enum overload_type overload = get_overload(out_type, intr->def.bit_size); const struct dxil_func *func = dxil_get_function(&ctx->mod, func_name, overload); @@ -3741,7 +3741,7 @@ emit_load_input_via_intrinsic(struct ntd_context *ctx, nir_intrinsic_instr *intr struct dxil_signature_record *sig_rec = is_patch_constant ? &ctx->mod.patch_consts[nir_intrinsic_base(intr)] : &ctx->mod.inputs[ctx->mod.input_mappings[nir_intrinsic_base(intr)]]; - unsigned comp_size = intr->dest.ssa.bit_size == 64 ? 2 : 1; + unsigned comp_size = intr->def.bit_size == 64 ? 2 : 1; unsigned comp_mask = (1 << (intr->num_components * comp_size)) - 1; comp_mask <<= (var_base_component * comp_size); if (is_tess_level) @@ -3774,7 +3774,7 @@ emit_load_input_via_intrinsic(struct ntd_context *ctx, nir_intrinsic_instr *intr const struct dxil_value *retval = dxil_emit_call(&ctx->mod, func, args, num_args); if (!retval) return false; - store_def(ctx, &intr->dest.ssa, i, retval); + store_def(ctx, &intr->def, i, retval); } return true; } @@ -3839,7 +3839,7 @@ emit_load_interpolated_input(struct ntd_context *ctx, nir_intrinsic_instr *intr) if (ctx->mod.minor_validator >= 5) { struct dxil_signature_record *sig_rec = &ctx->mod.inputs[ctx->mod.input_mappings[nir_intrinsic_base(intr)]]; - unsigned comp_size = intr->dest.ssa.bit_size == 64 ? 2 : 1; + unsigned comp_size = intr->def.bit_size == 64 ? 2 : 1; unsigned comp_mask = (1 << (intr->num_components * comp_size)) - 1; comp_mask <<= (var_base_component * comp_size); for (unsigned r = 0; r < sig_rec->num_elements; ++r) @@ -3858,7 +3858,7 @@ emit_load_interpolated_input(struct ntd_context *ctx, nir_intrinsic_instr *intr) const struct dxil_value *retval = dxil_emit_call(&ctx->mod, func, args, num_args); if (!retval) return false; - store_def(ctx, &intr->dest.ssa, i, retval); + store_def(ctx, &intr->def, i, retval); } return true; } @@ -3887,7 +3887,7 @@ deref_to_gep(struct ntd_context *ctx, nir_deref_instr *deref) gep_indices[0] = var_array[var->data.driver_location]; for (uint32_t i = 0; i < count; ++i) - gep_indices[i + 1] = get_src_ssa(ctx, &path.path[i]->dest.ssa, 0); + gep_indices[i + 1] = get_src_ssa(ctx, &path.path[i]->def, 0); return dxil_emit_gep_inbounds(&ctx->mod, gep_indices, count + 1); } @@ -3900,11 +3900,11 @@ emit_load_deref(struct ntd_context *ctx, nir_intrinsic_instr *intr) return false; const struct dxil_value *retval = - dxil_emit_load(&ctx->mod, ptr, intr->dest.ssa.bit_size / 8, false); + dxil_emit_load(&ctx->mod, ptr, intr->def.bit_size / 8, false); if (!retval) return false; - store_def(ctx, &intr->dest.ssa, 0, retval); + store_def(ctx, &intr->def, 0, retval); return true; } @@ -3938,7 +3938,7 @@ emit_atomic_deref(struct ntd_context *ctx, nir_intrinsic_instr *intr) if (!retval) return false; - store_def(ctx, &intr->dest.ssa, 0, retval); + store_def(ctx, &intr->def, 0, retval); return true; } @@ -3960,7 +3960,7 @@ emit_atomic_deref_swap(struct ntd_context *ctx, nir_intrinsic_instr *intr) if (!retval) return false; - store_def(ctx, &intr->dest.ssa, 0, retval); + store_def(ctx, &intr->def, 0, retval); return true; } @@ -4149,14 +4149,14 @@ emit_image_load(struct ntd_context *ctx, nir_intrinsic_instr *intr) if (!load_result) return false; - assert(intr->dest.ssa.bit_size == 32); - unsigned num_components = intr->dest.ssa.num_components; + assert(intr->def.bit_size == 32); + unsigned num_components = intr->def.num_components; assert(num_components <= 4); for (unsigned i = 0; i < num_components; ++i) { const struct dxil_value *component = dxil_emit_extractval(&ctx->mod, load_result, i); if (!component) return false; - store_def(ctx, &intr->dest.ssa, i, component); + store_def(ctx, &intr->def, i, component); } if (util_format_get_nr_components(nir_intrinsic_format(intr)) > 1) @@ -4212,7 +4212,7 @@ emit_image_atomic(struct ntd_context *ctx, nir_intrinsic_instr *intr) if (!retval) return false; - store_def(ctx, &intr->dest.ssa, 0, retval); + store_def(ctx, &intr->def, 0, retval); return true; } @@ -4261,7 +4261,7 @@ emit_image_atomic_comp_swap(struct ntd_context *ctx, nir_intrinsic_instr *intr) if (!retval) return false; - store_def(ctx, &intr->dest.ssa, 0, retval); + store_def(ctx, &intr->def, 0, retval); return true; } @@ -4316,9 +4316,9 @@ emit_image_size(struct ntd_context *ctx, nir_intrinsic_instr *intr) if (!dimensions) return false; - for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) { + for (unsigned i = 0; i < intr->def.num_components; ++i) { const struct dxil_value *retval = dxil_emit_extractval(&ctx->mod, dimensions, i); - store_def(ctx, &intr->dest.ssa, i, retval); + store_def(ctx, &intr->def, i, retval); } return true; @@ -4349,7 +4349,7 @@ emit_get_ssbo_size(struct ntd_context *ctx, nir_intrinsic_instr *intr) return false; const struct dxil_value *retval = dxil_emit_extractval(&ctx->mod, dimensions, 0); - store_def(ctx, &intr->dest.ssa, 0, retval); + store_def(ctx, &intr->def, 0, retval); return true; } @@ -4383,7 +4383,7 @@ emit_ssbo_atomic(struct ntd_context *ctx, nir_intrinsic_instr *intr) if (!retval) return false; - store_def(ctx, &intr->dest.ssa, 0, retval); + store_def(ctx, &intr->def, 0, retval); return true; } @@ -4415,7 +4415,7 @@ emit_ssbo_atomic_comp_swap(struct ntd_context *ctx, nir_intrinsic_instr *intr) if (!retval) return false; - store_def(ctx, &intr->dest.ssa, 0, retval); + store_def(ctx, &intr->def, 0, retval); return true; } @@ -4443,8 +4443,8 @@ emit_vulkan_resource_index(struct ntd_context *ctx, nir_intrinsic_instr *intr) return false; } - store_def(ctx, &intr->dest.ssa, 0, index_value); - store_def(ctx, &intr->dest.ssa, 1, dxil_module_get_int32_const(&ctx->mod, 0)); + store_def(ctx, &intr->def, 0, index_value); + store_def(ctx, &intr->def, 1, dxil_module_get_int32_const(&ctx->mod, 0)); return true; } @@ -4498,8 +4498,8 @@ emit_load_vulkan_descriptor(struct ntd_context *ctx, nir_intrinsic_instr *intr) handle = emit_annotate_handle(ctx, unannotated_handle, res_props); } - store_ssa_def(ctx, &intr->dest.ssa, 0, handle); - store_def(ctx, &intr->dest.ssa, 1, get_src(ctx, &intr->src[0], 1, nir_type_uint32)); + store_ssa_def(ctx, &intr->def, 0, handle); + store_def(ctx, &intr->def, 1, get_src(ctx, &intr->src[0], 1, nir_type_uint32)); return true; } @@ -4531,7 +4531,7 @@ emit_load_sample_pos_from_id(struct ntd_context *ctx, nir_intrinsic_instr *intr) const struct dxil_value *coord = dxil_emit_binop(&ctx->mod, DXIL_BINOP_ADD, dxil_emit_extractval(&ctx->mod, v, i), dxil_module_get_float_const(&ctx->mod, 0.5f), 0); - store_def(ctx, &intr->dest.ssa, i, coord); + store_def(ctx, &intr->def, i, coord); } return true; } @@ -4546,7 +4546,7 @@ emit_load_sample_id(struct ntd_context *ctx, nir_intrinsic_instr *intr) return emit_load_unary_external_function(ctx, intr, "dx.op.sampleIndex", DXIL_INTR_SAMPLE_INDEX, nir_type_int); - store_def(ctx, &intr->dest.ssa, 0, dxil_module_get_int32_const(&ctx->mod, 0)); + store_def(ctx, &intr->def, 0, dxil_module_get_int32_const(&ctx->mod, 0)); return true; } @@ -4555,7 +4555,7 @@ emit_read_first_invocation(struct ntd_context *ctx, nir_intrinsic_instr *intr) { ctx->mod.feats.wave_ops = 1; const struct dxil_func *func = dxil_get_function(&ctx->mod, "dx.op.waveReadLaneFirst", - get_overload(nir_type_uint, intr->dest.ssa.bit_size)); + get_overload(nir_type_uint, intr->def.bit_size)); const struct dxil_value *args[] = { dxil_module_get_int32_const(&ctx->mod, DXIL_INTR_WAVE_READ_LANE_FIRST), get_src(ctx, intr->src, 0, nir_type_uint), @@ -4566,7 +4566,7 @@ emit_read_first_invocation(struct ntd_context *ctx, nir_intrinsic_instr *intr) const struct dxil_value *ret = dxil_emit_call(&ctx->mod, func, args, ARRAY_SIZE(args)); if (!ret) return false; - store_def(ctx, &intr->dest.ssa, 0, ret); + store_def(ctx, &intr->def, 0, ret); return true; } @@ -4576,7 +4576,7 @@ emit_read_invocation(struct ntd_context *ctx, nir_intrinsic_instr *intr) ctx->mod.feats.wave_ops = 1; bool quad = intr->intrinsic == nir_intrinsic_quad_broadcast; const struct dxil_func *func = dxil_get_function(&ctx->mod, quad ? "dx.op.quadReadLaneAt" : "dx.op.waveReadLaneAt", - get_overload(nir_type_uint, intr->dest.ssa.bit_size)); + get_overload(nir_type_uint, intr->def.bit_size)); const struct dxil_value *args[] = { dxil_module_get_int32_const(&ctx->mod, quad ? DXIL_INTR_QUAD_READ_LANE_AT : DXIL_INTR_WAVE_READ_LANE_AT), get_src(ctx, &intr->src[0], 0, nir_type_uint), @@ -4588,7 +4588,7 @@ emit_read_invocation(struct ntd_context *ctx, nir_intrinsic_instr *intr) const struct dxil_value *ret = dxil_emit_call(&ctx->mod, func, args, ARRAY_SIZE(args)); if (!ret) return false; - store_def(ctx, &intr->dest.ssa, 0, ret); + store_def(ctx, &intr->def, 0, ret); return true; } @@ -4609,7 +4609,7 @@ emit_vote_eq(struct ntd_context *ctx, nir_intrinsic_instr *intr) const struct dxil_value *ret = dxil_emit_call(&ctx->mod, func, args, ARRAY_SIZE(args)); if (!ret) return false; - store_def(ctx, &intr->dest.ssa, 0, ret); + store_def(ctx, &intr->def, 0, ret); return true; } @@ -4631,7 +4631,7 @@ emit_vote(struct ntd_context *ctx, nir_intrinsic_instr *intr) const struct dxil_value *ret = dxil_emit_call(&ctx->mod, func, args, ARRAY_SIZE(args)); if (!ret) return false; - store_def(ctx, &intr->dest.ssa, 0, ret); + store_def(ctx, &intr->def, 0, ret); return true; } @@ -4651,7 +4651,7 @@ emit_ballot(struct ntd_context *ctx, nir_intrinsic_instr *intr) if (!ret) return false; for (uint32_t i = 0; i < 4; ++i) - store_def(ctx, &intr->dest.ssa, i, dxil_emit_extractval(&ctx->mod, ret, i)); + store_def(ctx, &intr->def, i, dxil_emit_extractval(&ctx->mod, ret, i)); return true; } @@ -4660,7 +4660,7 @@ emit_quad_op(struct ntd_context *ctx, nir_intrinsic_instr *intr, enum dxil_quad_ { ctx->mod.feats.wave_ops = 1; const struct dxil_func *func = dxil_get_function(&ctx->mod, "dx.op.quadOp", - get_overload(nir_type_uint, intr->dest.ssa.bit_size)); + get_overload(nir_type_uint, intr->def.bit_size)); const struct dxil_value *args[] = { dxil_module_get_int32_const(&ctx->mod, DXIL_INTR_QUAD_OP), get_src(ctx, intr->src, 0, nir_type_uint), @@ -4672,7 +4672,7 @@ emit_quad_op(struct ntd_context *ctx, nir_intrinsic_instr *intr, enum dxil_quad_ const struct dxil_value *ret = dxil_emit_call(&ctx->mod, func, args, ARRAY_SIZE(args)); if (!ret) return false; - store_def(ctx, &intr->dest.ssa, 0, ret); + store_def(ctx, &intr->def, 0, ret); return true; } @@ -4693,7 +4693,7 @@ emit_reduce_bitwise(struct ntd_context *ctx, nir_intrinsic_instr *intr) { enum dxil_wave_bit_op_kind wave_bit_op = get_reduce_bit_op(nir_intrinsic_reduction_op(intr)); const struct dxil_func *func = dxil_get_function(&ctx->mod, "dx.op.waveActiveBit", - get_overload(nir_type_uint, intr->dest.ssa.bit_size)); + get_overload(nir_type_uint, intr->def.bit_size)); const struct dxil_value *args[] = { dxil_module_get_int32_const(&ctx->mod, DXIL_INTR_WAVE_ACTIVE_BIT), get_src(ctx, intr->src, 0, nir_type_uint), @@ -4705,7 +4705,7 @@ emit_reduce_bitwise(struct ntd_context *ctx, nir_intrinsic_instr *intr) const struct dxil_value *ret = dxil_emit_call(&ctx->mod, func, args, ARRAY_SIZE(args)); if (!ret) return false; - store_def(ctx, &intr->dest.ssa, 0, ret); + store_def(ctx, &intr->def, 0, ret); return true; } @@ -4750,7 +4750,7 @@ emit_reduce(struct ntd_context *ctx, nir_intrinsic_instr *intr) nir_alu_type alu_type = nir_op_infos[reduction_op].input_types[0]; enum dxil_wave_op_kind wave_op = get_reduce_op(reduction_op); const struct dxil_func *func = dxil_get_function(&ctx->mod, is_prefix ? "dx.op.wavePrefixOp" : "dx.op.waveActiveOp", - get_overload(alu_type, intr->dest.ssa.bit_size)); + get_overload(alu_type, intr->def.bit_size)); bool is_unsigned = alu_type == nir_type_uint; const struct dxil_value *args[] = { dxil_module_get_int32_const(&ctx->mod, is_prefix ? DXIL_INTR_WAVE_PREFIX_OP : DXIL_INTR_WAVE_ACTIVE_OP), @@ -4764,7 +4764,7 @@ emit_reduce(struct ntd_context *ctx, nir_intrinsic_instr *intr) const struct dxil_value *ret = dxil_emit_call(&ctx->mod, func, args, ARRAY_SIZE(args)); if (!ret) return false; - store_def(ctx, &intr->dest.ssa, 0, ret); + store_def(ctx, &intr->def, 0, ret); return true; } @@ -4979,13 +4979,13 @@ emit_deref(struct ntd_context* ctx, nir_deref_instr* instr) /* Just store the values, we'll use these to build a GEP in the load or store */ switch (instr->deref_type) { case nir_deref_type_var: - store_def(ctx, &instr->dest.ssa, 0, dxil_module_get_int_const(&ctx->mod, 0, instr->dest.ssa.bit_size)); + store_def(ctx, &instr->def, 0, dxil_module_get_int_const(&ctx->mod, 0, instr->def.bit_size)); return true; case nir_deref_type_array: - store_def(ctx, &instr->dest.ssa, 0, get_src(ctx, &instr->arr.index, 0, nir_type_int)); + store_def(ctx, &instr->def, 0, get_src(ctx, &instr->arr.index, 0, nir_type_int)); return true; case nir_deref_type_struct: - store_def(ctx, &instr->dest.ssa, 0, dxil_module_get_int_const(&ctx->mod, instr->strct.index, 32)); + store_def(ctx, &instr->def, 0, dxil_module_get_int_const(&ctx->mod, instr->strct.index, 32)); return true; default: unreachable("Other deref types not supported"); @@ -5025,7 +5025,7 @@ emit_deref(struct ntd_context* ctx, nir_deref_instr* instr) /* Haven't finished chasing the deref chain yet, just store the value */ if (glsl_type_is_array(type)) { - store_def(ctx, &instr->dest.ssa, 0, binding); + store_def(ctx, &instr->def, 0, binding); return true; } @@ -5045,7 +5045,7 @@ emit_deref(struct ntd_context* ctx, nir_deref_instr* instr) if (!handle) return false; - store_ssa_def(ctx, &instr->dest.ssa, 0, handle); + store_ssa_def(ctx, &instr->def, 0, handle); return true; } @@ -5097,13 +5097,13 @@ emit_phi(struct ntd_context *ctx, nir_phi_instr *instr) } struct phi_block *vphi = ralloc(ctx->phis, struct phi_block); - vphi->num_components = instr->dest.ssa.num_components; + vphi->num_components = instr->def.num_components; for (unsigned i = 0; i < vphi->num_components; ++i) { struct dxil_instr *phi = vphi->comp[i] = dxil_emit_phi(&ctx->mod, type); if (!phi) return false; - store_ssa_def(ctx, &instr->dest.ssa, i, dxil_instr_get_return_value(phi)); + store_ssa_def(ctx, &instr->def, i, dxil_instr_get_return_value(phi)); } _mesa_hash_table_insert(ctx->phis, instr, vphi); return true; @@ -5589,16 +5589,16 @@ emit_tex(struct ntd_context *ctx, nir_tex_instr *instr) case nir_texop_lod: sample = emit_texture_lod(ctx, ¶ms, true); - store_def(ctx, &instr->dest.ssa, 0, sample); + store_def(ctx, &instr->def, 0, sample); sample = emit_texture_lod(ctx, ¶ms, false); - store_def(ctx, &instr->dest.ssa, 1, sample); + store_def(ctx, &instr->def, 1, sample); return true; case nir_texop_query_levels: { params.lod_or_sample = dxil_module_get_int_const(&ctx->mod, 0, 32); sample = emit_texture_size(ctx, ¶ms); const struct dxil_value *retval = dxil_emit_extractval(&ctx->mod, sample, 3); - store_def(ctx, &instr->dest.ssa, 0, retval); + store_def(ctx, &instr->def, 0, retval); return true; } @@ -5606,7 +5606,7 @@ emit_tex(struct ntd_context *ctx, nir_tex_instr *instr) params.lod_or_sample = int_undef; sample = emit_texture_size(ctx, ¶ms); const struct dxil_value *retval = dxil_emit_extractval(&ctx->mod, sample, 3); - store_def(ctx, &instr->dest.ssa, 0, retval); + store_def(ctx, &instr->def, 0, retval); return true; } @@ -5618,9 +5618,9 @@ emit_tex(struct ntd_context *ctx, nir_tex_instr *instr) if (!sample) return false; - for (unsigned i = 0; i < instr->dest.ssa.num_components; ++i) { + for (unsigned i = 0; i < instr->def.num_components; ++i) { const struct dxil_value *retval = dxil_emit_extractval(&ctx->mod, sample, i); - store_def(ctx, &instr->dest.ssa, i, retval); + store_def(ctx, &instr->def, i, retval); } return true; diff --git a/src/microsoft/spirv_to_dxil/dxil_spirv_nir.c b/src/microsoft/spirv_to_dxil/dxil_spirv_nir.c index 0bf0ec9..d3658bb 100644 --- a/src/microsoft/spirv_to_dxil/dxil_spirv_nir.c +++ b/src/microsoft/spirv_to_dxil/dxil_spirv_nir.c @@ -249,16 +249,16 @@ lower_shader_system_values(struct nir_builder *builder, nir_instr *instr, nir_def *load_data = nir_load_ubo( builder, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size, + intrin->def.num_components, + intrin->def.bit_size, nir_channel(builder, load_desc, 0), nir_imm_int(builder, offset), .align_mul = 256, .align_offset = offset, .range_base = offset, - .range = intrin->dest.ssa.bit_size * intrin->dest.ssa.num_components / 8); + .range = intrin->def.bit_size * intrin->def.num_components / 8); - nir_def_rewrite_uses(&intrin->dest.ssa, load_data); + nir_def_rewrite_uses(&intrin->def, load_data); nir_instr_remove(instr); return true; } @@ -338,8 +338,8 @@ lower_load_push_constant(struct nir_builder *builder, nir_instr *instr, nir_def *offset = nir_ssa_for_src(builder, intrin->src[0], 1); nir_def *load_data = nir_load_ubo( builder, - intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size, + intrin->def.num_components, + intrin->def.bit_size, nir_channel(builder, load_desc, 0), nir_iadd_imm(builder, offset, base), .align_mul = nir_intrinsic_align_mul(intrin), @@ -347,7 +347,7 @@ lower_load_push_constant(struct nir_builder *builder, nir_instr *instr, .range_base = base, .range = range); - nir_def_rewrite_uses(&intrin->dest.ssa, load_data); + nir_def_rewrite_uses(&intrin->def, load_data); nir_instr_remove(instr); return true; } @@ -517,7 +517,7 @@ discard_psiz_access(struct nir_builder *builder, nir_instr *instr, builder->cursor = nir_before_instr(instr); if (intrin->intrinsic == nir_intrinsic_load_deref) - nir_def_rewrite_uses(&intrin->dest.ssa, nir_imm_float(builder, 1.0)); + nir_def_rewrite_uses(&intrin->def, nir_imm_float(builder, 1.0)); nir_instr_remove(instr); return true; @@ -595,9 +595,9 @@ kill_undefined_varyings(struct nir_builder *b, * since that would remove the store instruction, and would make it tricky to satisfy * the DXIL requirements of writing all position components. */ - nir_def *zero = nir_imm_zero(b, intr->dest.ssa.num_components, - intr->dest.ssa.bit_size); - nir_def_rewrite_uses(&intr->dest.ssa, zero); + nir_def *zero = nir_imm_zero(b, intr->def.num_components, + intr->def.bit_size); + nir_def_rewrite_uses(&intr->def, zero); nir_instr_remove(instr); return true; } @@ -785,7 +785,7 @@ lower_pntc_read(nir_builder *b, nir_instr *instr, void *data) if (!var || var->data.location != VARYING_SLOT_PNTC) return false; - nir_def *point_center = &intr->dest.ssa; + nir_def *point_center = &intr->def; nir_variable *pos_var = (nir_variable *)data; b->cursor = nir_after_instr(instr); @@ -795,11 +795,11 @@ lower_pntc_read(nir_builder *b, nir_instr *instr, void *data) pos = nir_load_var(b, pos_var); else if (var->data.sample) pos = nir_interp_deref_at_sample(b, 4, 32, - &nir_build_deref_var(b, pos_var)->dest.ssa, + &nir_build_deref_var(b, pos_var)->def, nir_load_sample_id(b)); else pos = nir_interp_deref_at_offset(b, 4, 32, - &nir_build_deref_var(b, pos_var)->dest.ssa, + &nir_build_deref_var(b, pos_var)->def, nir_imm_zero(b, 2, 32)); nir_def *pntc = nir_fadd_imm(b, @@ -942,7 +942,7 @@ lower_bit_size_callback(const nir_instr *instr, void *data) case nir_intrinsic_reduce: case nir_intrinsic_inclusive_scan: case nir_intrinsic_exclusive_scan: - return intr->dest.ssa.bit_size == 1 ? 32 : 0; + return intr->def.bit_size == 1 ? 32 : 0; default: return 0; } diff --git a/src/microsoft/spirv_to_dxil/dxil_spirv_nir_lower_bindless.c b/src/microsoft/spirv_to_dxil/dxil_spirv_nir_lower_bindless.c index cb0d411..eea5aae 100644 --- a/src/microsoft/spirv_to_dxil/dxil_spirv_nir_lower_bindless.c +++ b/src/microsoft/spirv_to_dxil/dxil_spirv_nir_lower_bindless.c @@ -110,7 +110,7 @@ lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *intr, nir_def *res_idx = load_vulkan_ssbo(b, remap.descriptor_set, nir_imul_imm(b, index_in_ubo, descriptor_size), 2); - nir_def_rewrite_uses(&intr->dest.ssa, res_idx); + nir_def_rewrite_uses(&intr->def, res_idx); return true; } diff --git a/src/microsoft/vulkan/dzn_nir.c b/src/microsoft/vulkan/dzn_nir.c index 185a0c5..667ec31 100644 --- a/src/microsoft/vulkan/dzn_nir.c +++ b/src/microsoft/vulkan/dzn_nir.c @@ -715,12 +715,12 @@ dzn_nir_blit_fs(const struct dzn_nir_blit_info *info) nir_imm_int(&b, 0)); tex->src[3] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &tex_deref->dest.ssa); + &tex_deref->def); - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(&b, &tex->instr); - res = res ? nir_build_alu2(&b, resolve_op, res, &tex->dest.ssa) : &tex->dest.ssa; + res = res ? nir_build_alu2(&b, resolve_op, res, &tex->def) : &tex->def; } if (resolve_mode == dzn_blit_resolve_average) @@ -747,7 +747,7 @@ dzn_nir_blit_fs(const struct dzn_nir_blit_info *info) nir_imm_int(&b, 0)); tex->src[3] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &tex_deref->dest.ssa); + &tex_deref->def); } else { nir_variable *sampler_var = nir_variable_create(b.shader, nir_var_uniform, glsl_bare_sampler_type(), "sampler"); @@ -760,15 +760,15 @@ dzn_nir_blit_fs(const struct dzn_nir_blit_info *info) tex->coord_components = coord_comps; tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &tex_deref->dest.ssa); + &tex_deref->def); tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref, - &sampler_deref->dest.ssa); + &sampler_deref->def); } - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(&b, &tex->instr); - res = &tex->dest.ssa; + res = &tex->def; } nir_store_var(&b, out, nir_trim_vector(&b, res, out_comps), 0xf); diff --git a/src/nouveau/codegen/nv50_ir_from_nir.cpp b/src/nouveau/codegen/nv50_ir_from_nir.cpp index 0c5ef4c..b28f008 100644 --- a/src/nouveau/codegen/nv50_ir_from_nir.cpp +++ b/src/nouveau/codegen/nv50_ir_from_nir.cpp @@ -278,7 +278,7 @@ Converter::getDType(nir_intrinsic_instr *insn) break; } - return typeOfSize(insn->dest.ssa.bit_size / 8, isFloat, isSigned); + return typeOfSize(insn->def.bit_size / 8, isFloat, isSigned); } DataType @@ -1615,7 +1615,7 @@ Converter::visit(nir_intrinsic_instr *insn) switch (op) { case nir_intrinsic_decl_reg: { - const unsigned reg_index = insn->dest.ssa.index; + const unsigned reg_index = insn->def.index; const unsigned bit_size = nir_intrinsic_bit_size(insn); const unsigned num_components = nir_intrinsic_num_components(insn); assert(nir_intrinsic_num_array_elems(insn) == 0); @@ -1636,7 +1636,7 @@ Converter::visit(nir_intrinsic_instr *insn) LValues &src = it->second; DataType dType = getDType(insn); - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); for (uint8_t c = 0; c < insn->num_components; c++) mkMov(newDefs[c], src[c], dType); break; @@ -1703,7 +1703,7 @@ Converter::visit(nir_intrinsic_instr *insn) case nir_intrinsic_load_input: case nir_intrinsic_load_interpolated_input: case nir_intrinsic_load_output: { - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); // FBFetch if (prog->getType() == Program::TYPE_FRAGMENT && @@ -1814,7 +1814,7 @@ Converter::visit(nir_intrinsic_instr *insn) case nir_intrinsic_load_barycentric_centroid: case nir_intrinsic_load_barycentric_pixel: case nir_intrinsic_load_barycentric_sample: { - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); uint32_t mode; if (op == nir_intrinsic_load_barycentric_centroid || @@ -1895,7 +1895,7 @@ Converter::visit(nir_intrinsic_instr *insn) case nir_intrinsic_load_work_dim: { const DataType dType = getDType(insn); SVSemantic sv = convert(op); - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); for (uint8_t i = 0u; i < nir_intrinsic_dest_components(insn); ++i) { Value *def; @@ -1920,14 +1920,14 @@ Converter::visit(nir_intrinsic_instr *insn) } // constants case nir_intrinsic_load_subgroup_size: { - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); loadImm(newDefs[0], 32u); break; } case nir_intrinsic_vote_all: case nir_intrinsic_vote_any: case nir_intrinsic_vote_ieq: { - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); Value *pred = getScratch(1, FILE_PREDICATE); mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero); mkOp1(OP_VOTE, TYPE_U32, pred, pred)->subOp = getSubOp(op); @@ -1935,7 +1935,7 @@ Converter::visit(nir_intrinsic_instr *insn) break; } case nir_intrinsic_ballot: { - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); Value *pred = getSSA(1, FILE_PREDICATE); mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero); mkOp1(OP_VOTE, TYPE_U32, newDefs[0], pred)->subOp = NV50_IR_SUBOP_VOTE_ANY; @@ -1943,7 +1943,7 @@ Converter::visit(nir_intrinsic_instr *insn) } case nir_intrinsic_read_first_invocation: case nir_intrinsic_read_invocation: { - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); const DataType dType = getDType(insn); Value *tmp = getScratch(); @@ -1962,7 +1962,7 @@ Converter::visit(nir_intrinsic_instr *insn) } case nir_intrinsic_load_per_vertex_input: { const DataType dType = getDType(insn); - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); Value *indirectVertex; Value *indirectOffset; uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex); @@ -1979,7 +1979,7 @@ Converter::visit(nir_intrinsic_instr *insn) } case nir_intrinsic_load_per_vertex_output: { const DataType dType = getDType(insn); - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); Value *indirectVertex; Value *indirectOffset; uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex); @@ -2016,7 +2016,7 @@ Converter::visit(nir_intrinsic_instr *insn) } case nir_intrinsic_load_ubo: { const DataType dType = getDType(insn); - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); Value *indirectIndex; Value *indirectOffset; uint32_t index = getIndirect(&insn->src[0], 0, indirectIndex); @@ -2031,7 +2031,7 @@ Converter::visit(nir_intrinsic_instr *insn) break; } case nir_intrinsic_get_ssbo_size: { - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); const DataType dType = getDType(insn); Value *indirectBuffer; uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer); @@ -2063,7 +2063,7 @@ Converter::visit(nir_intrinsic_instr *insn) } case nir_intrinsic_load_ssbo: { const DataType dType = getDType(insn); - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); Value *indirectBuffer; Value *indirectOffset; uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer); @@ -2081,7 +2081,7 @@ Converter::visit(nir_intrinsic_instr *insn) case nir_intrinsic_shared_atomic: case nir_intrinsic_shared_atomic_swap: { const DataType dType = getDType(insn); - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); Value *indirectOffset; uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset); Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, dType, offset); @@ -2095,7 +2095,7 @@ Converter::visit(nir_intrinsic_instr *insn) case nir_intrinsic_ssbo_atomic: case nir_intrinsic_ssbo_atomic_swap: { const DataType dType = getDType(insn); - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); Value *indirectBuffer; Value *indirectOffset; uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer); @@ -2116,7 +2116,7 @@ Converter::visit(nir_intrinsic_instr *insn) case nir_intrinsic_global_atomic: case nir_intrinsic_global_atomic_swap: { const DataType dType = getDType(insn); - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); Value *address; uint32_t offset = getIndirect(&insn->src[0], 0, address); @@ -2155,7 +2155,7 @@ Converter::visit(nir_intrinsic_instr *insn) uint16_t location = 0; if (opInfo.has_dest) { - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); for (uint8_t i = 0u; i < newDefs.size(); ++i) { defs.push_back(newDefs[i]); mask |= 1 << i; @@ -2281,7 +2281,7 @@ Converter::visit(nir_intrinsic_instr *insn) case nir_intrinsic_load_scratch: case nir_intrinsic_load_shared: { const DataType dType = getDType(insn); - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); Value *indirectOffset; uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset); if (indirectOffset) @@ -2322,7 +2322,7 @@ Converter::visit(nir_intrinsic_instr *insn) } case nir_intrinsic_shader_clock: { const DataType dType = getDType(insn); - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); loadImm(newDefs[0], 0u); mkOp1(OP_RDSV, dType, newDefs[1], mkSysVal(SV_CLOCK, 0))->fixed = 1; @@ -2331,7 +2331,7 @@ Converter::visit(nir_intrinsic_instr *insn) case nir_intrinsic_load_global: case nir_intrinsic_load_global_constant: { const DataType dType = getDType(insn); - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); Value *indirectOffset; uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset); @@ -2960,7 +2960,7 @@ Converter::visit(nir_tex_instr *insn) case nir_texop_txf_ms: case nir_texop_txl: case nir_texop_txs: { - LValues &newDefs = convert(&insn->dest.ssa); + LValues &newDefs = convert(&insn->def); std::vector srcs; std::vector defs; std::vector offsets; diff --git a/src/nouveau/vulkan/nvk_nir_lower_descriptors.c b/src/nouveau/vulkan/nvk_nir_lower_descriptors.c index cb5eb4a..ad45545 100644 --- a/src/nouveau/vulkan/nvk_nir_lower_descriptors.c +++ b/src/nouveau/vulkan/nvk_nir_lower_descriptors.c @@ -152,7 +152,7 @@ try_lower_load_vulkan_descriptor(nir_builder *b, nir_intrinsic_instr *intrin, nir_def *desc = load_descriptor_for_idx_intrin(b, idx_intrin, ctx); - nir_def_rewrite_uses(&intrin->dest.ssa, desc); + nir_def_rewrite_uses(&intrin->def, desc); return true; } @@ -173,7 +173,7 @@ lower_num_workgroups(nir_builder *b, nir_intrinsic_instr *load, .align_offset = 0, .range = root_table_offset + 3 * 4); - nir_def_rewrite_uses(&load->dest.ssa, val); + nir_def_rewrite_uses(&load->def, val); return true; } @@ -194,7 +194,7 @@ lower_load_base_workgroup_id(nir_builder *b, nir_intrinsic_instr *load, .align_offset = 0, .range = root_table_offset + 3 * 4); - nir_def_rewrite_uses(&load->dest.ssa, val); + nir_def_rewrite_uses(&load->def, val); return true; } @@ -213,14 +213,14 @@ lower_load_push_constant(nir_builder *b, nir_intrinsic_instr *load, push_region_offset + base); nir_def *val = - nir_load_ubo(b, load->dest.ssa.num_components, load->dest.ssa.bit_size, + nir_load_ubo(b, load->def.num_components, load->def.bit_size, nir_imm_int(b, 0), offset, - .align_mul = load->dest.ssa.bit_size / 8, + .align_mul = load->def.bit_size / 8, .align_offset = 0, .range = push_region_offset + base + nir_intrinsic_range(load)); - nir_def_rewrite_uses(&load->dest.ssa, val); + nir_def_rewrite_uses(&load->def, val); return true; } @@ -241,7 +241,7 @@ lower_load_view_index(nir_builder *b, nir_intrinsic_instr *load, .align_offset = 0, .range = root_table_offset + 4); - nir_def_rewrite_uses(&load->dest.ssa, val); + nir_def_rewrite_uses(&load->def, val); return true; } @@ -503,7 +503,7 @@ lower_ssbo_resource_index(nir_builder *b, nir_intrinsic_instr *intrin, unreachable("Unknown address mode"); } - nir_def_rewrite_uses(&intrin->dest.ssa, addr); + nir_def_rewrite_uses(&intrin->def, addr); return true; } @@ -542,7 +542,7 @@ lower_ssbo_resource_reindex(nir_builder *b, nir_intrinsic_instr *intrin, addr = nir_build_addr_iadd(b, addr, ctx->ssbo_addr_format, nir_var_mem_ssbo, offset); - nir_def_rewrite_uses(&intrin->dest.ssa, addr); + nir_def_rewrite_uses(&intrin->def, addr); return true; } @@ -597,7 +597,7 @@ lower_load_ssbo_descriptor(nir_builder *b, nir_intrinsic_instr *intrin, unreachable("Unknown address mode"); } - nir_def_rewrite_uses(&intrin->dest.ssa, desc); + nir_def_rewrite_uses(&intrin->def, desc); return true; } diff --git a/src/nouveau/vulkan/nvk_shader.c b/src/nouveau/vulkan/nvk_shader.c index 1b4cb42..1c2928e 100644 --- a/src/nouveau/vulkan/nvk_shader.c +++ b/src/nouveau/vulkan/nvk_shader.c @@ -132,7 +132,7 @@ lower_image_size_to_txs(nir_builder *b, nir_instr *instr, UNUSED void *_data) } } - nir_def_rewrite_uses(&intrin->dest.ssa, size); + nir_def_rewrite_uses(&intrin->def, size); return true; } @@ -158,7 +158,7 @@ lower_load_global_constant_offset_instr(nir_builder *b, nir_instr *instr, if (intrin->intrinsic == nir_intrinsic_load_global_constant_bounded) { nir_def *bound = intrin->src[2].ssa; - unsigned bit_size = intrin->dest.ssa.bit_size; + unsigned bit_size = intrin->def.bit_size; assert(bit_size >= 8 && bit_size % 8 == 0); unsigned byte_size = bit_size / 8; @@ -175,8 +175,8 @@ lower_load_global_constant_offset_instr(nir_builder *b, nir_instr *instr, } nir_def *val = - nir_build_load_global(b, intrin->dest.ssa.num_components, - intrin->dest.ssa.bit_size, + nir_build_load_global(b, intrin->def.num_components, + intrin->def.bit_size, nir_iadd(b, base_addr, nir_u2u64(b, offset)), .access = nir_intrinsic_access(intrin), .align_mul = nir_intrinsic_align_mul(intrin), @@ -187,7 +187,7 @@ lower_load_global_constant_offset_instr(nir_builder *b, nir_instr *instr, val = nir_if_phi(b, val, zero); } - nir_def_rewrite_uses(&intrin->dest.ssa, val); + nir_def_rewrite_uses(&intrin->def, val); return true; } @@ -251,7 +251,7 @@ lower_fragcoord_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data) return false; } - nir_def_rewrite_uses(&intrin->dest.ssa, val); + nir_def_rewrite_uses(&intrin->def, val); return true; } @@ -271,7 +271,7 @@ lower_system_value_first_vertex(nir_builder *b, nir_instr *instr, UNUSED void *_ b->cursor = nir_before_instr(&intrin->instr); nir_def *base_vertex = nir_load_base_vertex(b); - nir_def_rewrite_uses(&intrin->dest.ssa, base_vertex); + nir_def_rewrite_uses(&intrin->def, base_vertex); return true; } diff --git a/src/panfrost/compiler/bi_lower_divergent_indirects.c b/src/panfrost/compiler/bi_lower_divergent_indirects.c index 1f4b2b0..070645d 100644 --- a/src/panfrost/compiler/bi_lower_divergent_indirects.c +++ b/src/panfrost/compiler/bi_lower_divergent_indirects.c @@ -84,11 +84,11 @@ bi_lower_divergent_indirects_impl(nir_builder *b, nir_instr *instr, void *data) /* Write zero in a funny way to bypass lower_load_const_to_scalar */ bool has_dest = nir_intrinsic_infos[intr->intrinsic].has_dest; - unsigned size = has_dest ? intr->dest.ssa.bit_size : 32; + unsigned size = has_dest ? intr->def.bit_size : 32; nir_def *zero = has_dest ? nir_imm_zero(b, 1, size) : NULL; nir_def *zeroes[4] = {zero, zero, zero, zero}; nir_def *res = - has_dest ? nir_vec(b, zeroes, intr->dest.ssa.num_components) : NULL; + has_dest ? nir_vec(b, zeroes, intr->def.num_components) : NULL; for (unsigned i = 0; i < (*lanes); ++i) { nir_push_if(b, nir_ieq_imm(b, lane, i)); @@ -99,13 +99,13 @@ bi_lower_divergent_indirects_impl(nir_builder *b, nir_instr *instr, void *data) nir_pop_if(b, NULL); if (has_dest) { - nir_def *c_ssa = &c_intr->dest.ssa; + nir_def *c_ssa = &c_intr->def; res = nir_if_phi(b, c_ssa, res); } } if (has_dest) - nir_def_rewrite_uses(&intr->dest.ssa, res); + nir_def_rewrite_uses(&intr->def, res); nir_instr_remove(instr); return true; diff --git a/src/panfrost/compiler/bifrost_compile.c b/src/panfrost/compiler/bifrost_compile.c index 7ab2ef3..1c8b548 100644 --- a/src/panfrost/compiler/bifrost_compile.c +++ b/src/panfrost/compiler/bifrost_compile.c @@ -392,7 +392,7 @@ bi_copy_component(bi_builder *b, nir_intrinsic_instr *instr, bi_index tmp) unsigned component = nir_intrinsic_component(instr); unsigned nr = instr->num_components; unsigned total = nr + component; - unsigned bitsize = instr->dest.ssa.bit_size; + unsigned bitsize = instr->def.bit_size; assert(total <= 4 && "should be vec4"); bi_emit_cached_split(b, tmp, total * bitsize); @@ -403,8 +403,8 @@ bi_copy_component(bi_builder *b, nir_intrinsic_instr *instr, bi_index tmp) bi_index srcs[] = {tmp, tmp, tmp}; unsigned channels[] = {component, component + 1, component + 2}; - bi_make_vec_to(b, bi_def_index(&instr->dest.ssa), srcs, channels, nr, - instr->dest.ssa.bit_size); + bi_make_vec_to(b, bi_def_index(&instr->def), srcs, channels, nr, + instr->def.bit_size); } static void @@ -429,7 +429,7 @@ bi_emit_load_attr(bi_builder *b, nir_intrinsic_instr *instr) bool constant = nir_src_is_const(*offset); bool immediate = bi_is_intr_immediate(instr, &imm_index, 16); bi_index dest = - (component == 0) ? bi_def_index(&instr->dest.ssa) : bi_temp(b->shader); + (component == 0) ? bi_def_index(&instr->def) : bi_temp(b->shader); bi_instr *I; if (immediate) { @@ -500,9 +500,9 @@ bi_emit_load_vary(bi_builder *b, nir_intrinsic_instr *instr) unsigned component = nir_intrinsic_component(instr); enum bi_vecsize vecsize = (instr->num_components + component - 1); bi_index dest = - (component == 0) ? bi_def_index(&instr->dest.ssa) : bi_temp(b->shader); + (component == 0) ? bi_def_index(&instr->def) : bi_temp(b->shader); - unsigned sz = instr->dest.ssa.bit_size; + unsigned sz = instr->def.bit_size; if (smooth) { nir_intrinsic_instr *parent = nir_src_as_intrinsic(instr->src[0]); @@ -734,8 +734,7 @@ bi_emit_load_blend_input(bi_builder *b, nir_intrinsic_instr *instr) bi_index srcs[] = {bi_preload(b, base + 0), bi_preload(b, base + 1), bi_preload(b, base + 2), bi_preload(b, base + 3)}; - bi_emit_collect_to(b, bi_def_index(&instr->dest.ssa), srcs, - size == 32 ? 4 : 2); + bi_emit_collect_to(b, bi_def_index(&instr->def), srcs, size == 32 ? 4 : 2); } static void @@ -1050,8 +1049,8 @@ bi_emit_load_ubo(bi_builder *b, nir_intrinsic_instr *instr) bi_index dyn_offset = bi_src_index(offset); uint32_t const_offset = offset_is_const ? nir_src_as_uint(*offset) : 0; - bi_load_ubo_to(b, instr->num_components * instr->dest.ssa.bit_size, - bi_def_index(&instr->dest.ssa), + bi_load_ubo_to(b, instr->num_components * instr->def.bit_size, + bi_def_index(&instr->def), offset_is_const ? bi_imm_u32(const_offset) : dyn_offset, bi_src_index(&instr->src[0])); } @@ -1066,7 +1065,7 @@ bi_emit_load_push_constant(bi_builder *b, nir_intrinsic_instr *instr) uint32_t base = nir_intrinsic_base(instr) + nir_src_as_uint(*offset); assert((base & 3) == 0 && "unaligned push constants"); - unsigned bits = instr->dest.ssa.bit_size * instr->dest.ssa.num_components; + unsigned bits = instr->def.bit_size * instr->def.num_components; unsigned n = DIV_ROUND_UP(bits, 32); assert(n <= 4); @@ -1078,7 +1077,7 @@ bi_emit_load_push_constant(bi_builder *b, nir_intrinsic_instr *instr) channels[i] = bi_fau(BIR_FAU_UNIFORM | (word >> 1), word & 1); } - bi_emit_collect_to(b, bi_def_index(&instr->dest.ssa), channels, n); + bi_emit_collect_to(b, bi_def_index(&instr->def), channels, n); } static bi_index @@ -1123,8 +1122,8 @@ static void bi_emit_load(bi_builder *b, nir_intrinsic_instr *instr, enum bi_seg seg) { int16_t offset = 0; - unsigned bits = instr->num_components * instr->dest.ssa.bit_size; - bi_index dest = bi_def_index(&instr->dest.ssa); + unsigned bits = instr->num_components * instr->def.bit_size; + bi_index dest = bi_def_index(&instr->def); bi_index addr_lo = bi_extract(b, bi_src_index(&instr->src[0]), 0); bi_index addr_hi = bi_addr_high(b, &instr->src[0]); @@ -1334,7 +1333,7 @@ bi_emit_image_load(bi_builder *b, nir_intrinsic_instr *instr) bi_index coords = bi_src_index(&instr->src[1]); bi_index xy = bi_emit_image_coord(b, coords, 0, coord_comps, array); bi_index zw = bi_emit_image_coord(b, coords, 1, coord_comps, array); - bi_index dest = bi_def_index(&instr->dest.ssa); + bi_index dest = bi_def_index(&instr->def); enum bi_register_format regfmt = bi_reg_fmt_for_nir(nir_intrinsic_dest_type(instr)); enum bi_vecsize vecsize = instr->num_components - 1; @@ -1354,7 +1353,7 @@ bi_emit_image_load(bi_builder *b, nir_intrinsic_instr *instr) vecsize); } - bi_split_def(b, &instr->dest.ssa); + bi_split_def(b, &instr->def); } static void @@ -1467,10 +1466,10 @@ bi_emit_load_frag_coord_zw(bi_builder *b, bi_index dst, unsigned channel) static void bi_emit_ld_tile(bi_builder *b, nir_intrinsic_instr *instr) { - bi_index dest = bi_def_index(&instr->dest.ssa); + bi_index dest = bi_def_index(&instr->def); nir_alu_type T = nir_intrinsic_dest_type(instr); enum bi_register_format regfmt = bi_reg_fmt_for_nir(T); - unsigned size = instr->dest.ssa.bit_size; + unsigned size = instr->def.bit_size; unsigned nr = instr->num_components; /* Get the render target */ @@ -1488,7 +1487,7 @@ static void bi_emit_intrinsic(bi_builder *b, nir_intrinsic_instr *instr) { bi_index dst = nir_intrinsic_infos[instr->intrinsic].has_dest - ? bi_def_index(&instr->dest.ssa) + ? bi_def_index(&instr->def) : bi_null(); gl_shader_stage stage = b->shader->stage; @@ -1594,7 +1593,7 @@ bi_emit_intrinsic(bi_builder *b, nir_intrinsic_instr *instr) bi_emit_atomic_i32_to(b, dst, addr, bi_src_index(&instr->src[1]), op); } - bi_split_def(b, &instr->dest.ssa); + bi_split_def(b, &instr->def); break; } @@ -1611,7 +1610,7 @@ bi_emit_intrinsic(bi_builder *b, nir_intrinsic_instr *instr) bi_src_index(&instr->src[1]), op); } - bi_split_def(b, &instr->dest.ssa); + bi_split_def(b, &instr->def); break; } @@ -1630,13 +1629,13 @@ bi_emit_intrinsic(bi_builder *b, nir_intrinsic_instr *instr) case nir_intrinsic_global_atomic_swap: bi_emit_acmpxchg_to(b, dst, bi_src_index(&instr->src[0]), &instr->src[1], &instr->src[2], BI_SEG_NONE); - bi_split_def(b, &instr->dest.ssa); + bi_split_def(b, &instr->def); break; case nir_intrinsic_shared_atomic_swap: bi_emit_acmpxchg_to(b, dst, bi_src_index(&instr->src[0]), &instr->src[1], &instr->src[2], BI_SEG_WLS); - bi_split_def(b, &instr->dest.ssa); + bi_split_def(b, &instr->def); break; case nir_intrinsic_load_pixel_coord: @@ -1731,7 +1730,7 @@ bi_emit_intrinsic(bi_builder *b, nir_intrinsic_instr *instr) case nir_intrinsic_shader_clock: bi_ld_gclk_u64_to(b, dst, BI_SOURCE_CYCLE_COUNTER); - bi_split_def(b, &instr->dest.ssa); + bi_split_def(b, &instr->def); break; default: @@ -3244,7 +3243,7 @@ bi_emit_texc(bi_builder *b, nir_tex_instr *instr) .shadow_or_clamp_disable = instr->is_shadow, .array = instr->is_array, .dimension = bifrost_tex_format(instr->sampler_dim), - .format = bi_texture_format(instr->dest_type | instr->dest.ssa.bit_size, + .format = bi_texture_format(instr->dest_type | instr->def.bit_size, BI_CLAMP_NONE), /* TODO */ .mask = 0xF, }; @@ -3408,7 +3407,7 @@ bi_emit_texc(bi_builder *b, nir_tex_instr *instr) dregs[sr_count++] = dregs[i]; } - unsigned res_size = instr->dest.ssa.bit_size == 16 ? 2 : 4; + unsigned res_size = instr->def.bit_size == 16 ? 2 : 4; bi_index sr = sr_count ? bi_temp(b->shader) : bi_null(); bi_index dst = bi_temp(b->shader); @@ -3425,9 +3424,8 @@ bi_emit_texc(bi_builder *b, nir_tex_instr *instr) bi_index w[4] = {bi_null(), bi_null(), bi_null(), bi_null()}; bi_emit_split_i32(b, w, dst, res_size); - bi_emit_collect_to( - b, bi_def_index(&instr->dest.ssa), w, - DIV_ROUND_UP(instr->dest.ssa.num_components * res_size, 4)); + bi_emit_collect_to(b, bi_def_index(&instr->def), w, + DIV_ROUND_UP(instr->def.num_components * res_size, 4)); } /* Staging registers required by texturing in the order they appear (Valhall) */ @@ -3562,8 +3560,8 @@ bi_emit_tex_valhall(bi_builder *b, nir_tex_instr *instr) image_src = bi_lshift_or_i32(b, texture, image_src, bi_imm_u8(16)); /* Only write the components that we actually read */ - unsigned mask = nir_def_components_read(&instr->dest.ssa); - unsigned comps_per_reg = instr->dest.ssa.bit_size == 16 ? 2 : 1; + unsigned mask = nir_def_components_read(&instr->def); + unsigned comps_per_reg = instr->def.bit_size == 16 ? 2 : 1; unsigned res_size = DIV_ROUND_UP(util_bitcount(mask), comps_per_reg); enum bi_register_format regfmt = bi_reg_fmt_for_nir(instr->dest_type); @@ -3604,7 +3602,7 @@ bi_emit_tex_valhall(bi_builder *b, nir_tex_instr *instr) /* Index into the packed component array */ unsigned j = 0; unsigned comps[4] = {0}; - unsigned nr_components = instr->dest.ssa.num_components; + unsigned nr_components = instr->def.num_components; for (unsigned i = 0; i < nr_components; ++i) { if (mask & BITFIELD_BIT(i)) { @@ -3615,8 +3613,8 @@ bi_emit_tex_valhall(bi_builder *b, nir_tex_instr *instr) } } - bi_make_vec_to(b, bi_def_index(&instr->dest.ssa), unpacked, comps, - instr->dest.ssa.num_components, instr->dest.ssa.bit_size); + bi_make_vec_to(b, bi_def_index(&instr->def), unpacked, comps, + instr->def.num_components, instr->def.bit_size); } /* Simple textures ops correspond to NIR tex or txl with LOD = 0 on 2D/cube @@ -3634,17 +3632,16 @@ bi_emit_texs(bi_builder *b, nir_tex_instr *instr) bi_index face, s, t; bi_emit_cube_coord(b, coords, &face, &s, &t); - bi_texs_cube_to(b, instr->dest.ssa.bit_size, - bi_def_index(&instr->dest.ssa), s, t, face, - instr->sampler_index, instr->texture_index); + bi_texs_cube_to(b, instr->def.bit_size, bi_def_index(&instr->def), s, t, + face, instr->sampler_index, instr->texture_index); } else { - bi_texs_2d_to(b, instr->dest.ssa.bit_size, bi_def_index(&instr->dest.ssa), + bi_texs_2d_to(b, instr->def.bit_size, bi_def_index(&instr->def), bi_extract(b, coords, 0), bi_extract(b, coords, 1), instr->op != nir_texop_tex, /* zero LOD */ instr->sampler_index, instr->texture_index); } - bi_split_def(b, &instr->dest.ssa); + bi_split_def(b, &instr->def); } static bool @@ -3718,7 +3715,7 @@ static void bi_emit_phi(bi_builder *b, nir_phi_instr *instr) { unsigned nr_srcs = exec_list_length(&instr->srcs); - bi_instr *I = bi_phi_to(b, bi_def_index(&instr->dest.ssa), nr_srcs); + bi_instr *I = bi_phi_to(b, bi_def_index(&instr->def), nr_srcs); /* Deferred */ I->phi = instr; @@ -3739,7 +3736,7 @@ bi_emit_phi_deferred(bi_context *ctx, bi_block *block, bi_instr *I) nir_phi_instr *phi = I->phi; /* Guaranteed by lower_phis_to_scalar */ - assert(phi->dest.ssa.num_components == 1); + assert(phi->def.num_components == 1); nir_foreach_phi_src(src, phi) { bi_block *pred = bi_from_nir_block(ctx, src->pred); @@ -4623,11 +4620,11 @@ bi_lower_load_output(nir_builder *b, nir_instr *instr, UNUSED void *data) b, .base = rt, .src_type = nir_intrinsic_dest_type(intr)); nir_def *lowered = nir_load_converted_output_pan( - b, intr->dest.ssa.num_components, intr->dest.ssa.bit_size, conversion, + b, intr->def.num_components, intr->def.bit_size, conversion, .dest_type = nir_intrinsic_dest_type(intr), .io_semantics = nir_intrinsic_io_semantics(intr)); - nir_def_rewrite_uses(&intr->dest.ssa, lowered); + nir_def_rewrite_uses(&intr->def, lowered); return true; } diff --git a/src/panfrost/lib/pan_blend.c b/src/panfrost/lib/pan_blend.c index 4b858ca..adb8a97 100644 --- a/src/panfrost/lib/pan_blend.c +++ b/src/panfrost/lib/pan_blend.c @@ -621,7 +621,7 @@ pan_inline_blend_constants(nir_builder *b, nir_instr *instr, void *data) b->cursor = nir_after_instr(instr); nir_def *constant = nir_build_imm(b, 4, 32, constants); - nir_def_rewrite_uses(&intr->dest.ssa, constant); + nir_def_rewrite_uses(&intr->def, constant); nir_instr_remove(instr); return true; } @@ -806,7 +806,7 @@ inline_rt_conversion(nir_builder *b, nir_instr *instr, void *data) inputs->dev, inputs->formats[rt], rt, size, false); b->cursor = nir_after_instr(instr); - nir_def_rewrite_uses(&intr->dest.ssa, nir_imm_int(b, conversion >> 32)); + nir_def_rewrite_uses(&intr->def, nir_imm_int(b, conversion >> 32)); return true; } diff --git a/src/panfrost/lib/pan_blitter.c b/src/panfrost/lib/pan_blitter.c index 5be9f51..1546c9e 100644 --- a/src/panfrost/lib/pan_blitter.c +++ b/src/panfrost/lib/pan_blitter.c @@ -402,7 +402,7 @@ lower_sampler_parameters(nir_builder *b, nir_instr *instr, UNUSED void *data) }; b->cursor = nir_after_instr(instr); - nir_def_rewrite_uses(&intr->dest.ssa, nir_build_imm(b, 3, 32, constants)); + nir_def_rewrite_uses(&intr->def, nir_build_imm(b, 3, 32, constants)); return true; } @@ -557,10 +557,10 @@ pan_blitter_get_blit_shader(struct panfrost_device *dev, tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_lod, nir_imm_int(&b, 0)); - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(&b, &tex->instr); - res = res ? nir_fadd(&b, res, &tex->dest.ssa) : &tex->dest.ssa; + res = res ? nir_fadd(&b, res, &tex->def) : &tex->def; } if (base_type == nir_type_float) @@ -592,9 +592,9 @@ pan_blitter_get_blit_shader(struct panfrost_device *dev, tex->coord_components = coord_comps; } - nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32); + nir_def_init(&tex->instr, &tex->def, 4, 32); nir_builder_instr_insert(&b, &tex->instr); - res = &tex->dest.ssa; + res = &tex->def; } assert(res); diff --git a/src/panfrost/midgard/midgard_compile.c b/src/panfrost/midgard/midgard_compile.c index 50c8010..20add51 100644 --- a/src/panfrost/midgard/midgard_compile.c +++ b/src/panfrost/midgard/midgard_compile.c @@ -223,8 +223,8 @@ midgard_nir_lower_global_load_instr(nir_builder *b, nir_instr *instr, intr->intrinsic != nir_intrinsic_load_shared) return false; - unsigned compsz = intr->dest.ssa.bit_size; - unsigned totalsz = compsz * intr->dest.ssa.num_components; + unsigned compsz = intr->def.bit_size; + unsigned totalsz = compsz * intr->def.num_components; /* 8, 16, 32, 64 and 128 bit loads don't need to be lowered */ if (util_bitcount(totalsz) < 2 && totalsz <= 128) return false; @@ -251,10 +251,10 @@ midgard_nir_lower_global_load_instr(nir_builder *b, nir_instr *instr, shared_load->src[0] = nir_src_for_ssa(addr); nir_intrinsic_set_align(shared_load, compsz / 8, 0); nir_intrinsic_set_base(shared_load, nir_intrinsic_base(intr)); - nir_def_init(&shared_load->instr, &shared_load->dest.ssa, + nir_def_init(&shared_load->instr, &shared_load->def, shared_load->num_components, compsz); nir_builder_instr_insert(b, &shared_load->instr); - load = &shared_load->dest.ssa; + load = &shared_load->def; } for (unsigned i = 0; i < loadncomps; i++) @@ -264,8 +264,8 @@ midgard_nir_lower_global_load_instr(nir_builder *b, nir_instr *instr, addr = nir_iadd_imm(b, addr, loadsz / 8); } - assert(ncomps == intr->dest.ssa.num_components); - nir_def_rewrite_uses(&intr->dest.ssa, nir_vec(b, comps, ncomps)); + assert(ncomps == intr->def.num_components); + nir_def_rewrite_uses(&intr->def, nir_vec(b, comps, ncomps)); return true; } @@ -991,7 +991,7 @@ mir_set_intr_mask(nir_instr *instr, midgard_instruction *ins, bool is_read) nir_mask = mask_of(nir_intrinsic_dest_components(intr)); /* Extension is mandatory for 8/16-bit loads */ - dsize = intr->dest.ssa.bit_size == 64 ? 64 : 32; + dsize = intr->def.bit_size == 64 ? 64 : 32; } else { nir_mask = nir_intrinsic_write_mask(intr); dsize = OP_IS_COMMON_STORE(ins->op) ? nir_src_bit_size(intr->src[0]) : 32; @@ -1014,7 +1014,7 @@ emit_ubo_read(compiler_context *ctx, nir_instr *instr, unsigned dest, midgard_instruction ins; unsigned dest_size = (instr->type == nir_instr_type_intrinsic) - ? nir_instr_as_intrinsic(instr)->dest.ssa.bit_size + ? nir_instr_as_intrinsic(instr)->def.bit_size : 32; unsigned bitsize = dest_size * nr_comps; @@ -1070,8 +1070,7 @@ emit_global(compiler_context *ctx, nir_instr *instr, bool is_read, nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); if (is_read) { - unsigned bitsize = - intr->dest.ssa.bit_size * intr->dest.ssa.num_components; + unsigned bitsize = intr->def.bit_size * intr->def.num_components; switch (bitsize) { case 8: @@ -1098,7 +1097,7 @@ emit_global(compiler_context *ctx, nir_instr *instr, bool is_read, /* For anything not aligned on 32bit, make sure we write full * 32 bits registers. */ if (bitsize & 31) { - unsigned comps_per_32b = 32 / intr->dest.ssa.bit_size; + unsigned comps_per_32b = 32 / intr->def.bit_size; for (unsigned c = 0; c < 4 * comps_per_32b; c += comps_per_32b) { if (!(ins.mask & BITFIELD_RANGE(c, comps_per_32b))) @@ -1192,7 +1191,7 @@ emit_atomic(compiler_context *ctx, nir_intrinsic_instr *instr) bool is_shared = (instr->intrinsic == nir_intrinsic_shared_atomic) || (instr->intrinsic == nir_intrinsic_shared_atomic_swap); - unsigned dest = nir_def_index(&instr->dest.ssa); + unsigned dest = nir_def_index(&instr->def); unsigned val = nir_src_index(ctx, &instr->src[1]); unsigned bitsize = nir_src_bit_size(instr->src[1]); emit_explicit_constant(ctx, val); @@ -1323,12 +1322,12 @@ emit_image_op(compiler_context *ctx, nir_intrinsic_instr *instr) nir_alu_type base_type = nir_alu_type_get_base_type(type); ins.src_types[0] = base_type | nir_src_bit_size(instr->src[3]); } else if (instr->intrinsic == nir_intrinsic_image_texel_address) { - ins = m_lea_image(nir_def_index(&instr->dest.ssa), - PACK_LDST_ATTRIB_OFS(address)); + ins = + m_lea_image(nir_def_index(&instr->def), PACK_LDST_ATTRIB_OFS(address)); ins.mask = mask_of(2); /* 64-bit memory address */ } else { /* emit ld_image_* */ nir_alu_type type = nir_intrinsic_dest_type(instr); - ins = ld_image(type, nir_def_index(&instr->dest.ssa), + ins = ld_image(type, nir_def_index(&instr->def), PACK_LDST_ATTRIB_OFS(address)); ins.mask = mask_of(nir_intrinsic_dest_components(instr)); ins.dest_type = type; @@ -1456,7 +1455,7 @@ emit_fragment_store(compiler_context *ctx, unsigned src, unsigned src_z, static void emit_compute_builtin(compiler_context *ctx, nir_intrinsic_instr *instr) { - unsigned reg = nir_def_index(&instr->dest.ssa); + unsigned reg = nir_def_index(&instr->def); midgard_instruction ins = m_ldst_mov(reg, 0); ins.mask = mask_of(3); ins.swizzle[0][3] = COMPONENT_X; /* xyzx */ @@ -1480,7 +1479,7 @@ vertex_builtin_arg(nir_intrinsic_op op) static void emit_vertex_builtin(compiler_context *ctx, nir_intrinsic_instr *instr) { - unsigned reg = nir_def_index(&instr->dest.ssa); + unsigned reg = nir_def_index(&instr->def); emit_attr_read(ctx, reg, vertex_builtin_arg(instr->intrinsic), 1, nir_type_int); } @@ -1488,7 +1487,7 @@ emit_vertex_builtin(compiler_context *ctx, nir_intrinsic_instr *instr) static void emit_special(compiler_context *ctx, nir_intrinsic_instr *instr, unsigned idx) { - unsigned reg = nir_def_index(&instr->dest.ssa); + unsigned reg = nir_def_index(&instr->def); midgard_instruction ld = m_ld_tilebuffer_raw(reg, 0); ld.op = midgard_op_ld_special_32u; @@ -1551,12 +1550,11 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr) nir_def *handle = instr->src[0].ssa; midgard_instruction ins = - v_mov(nir_reg_index(handle), nir_def_index(&instr->dest.ssa)); + v_mov(nir_reg_index(handle), nir_def_index(&instr->def)); - ins.dest_type = ins.src_types[1] = - nir_type_uint | instr->dest.ssa.bit_size; + ins.dest_type = ins.src_types[1] = nir_type_uint | instr->def.bit_size; - ins.mask = BITFIELD_MASK(instr->dest.ssa.num_components); + ins.mask = BITFIELD_MASK(instr->def.num_components); emit_mir_instruction(ctx, ins); break; } @@ -1625,7 +1623,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr) /* We may need to apply a fractional offset */ int component = (is_flat || is_interp) ? nir_intrinsic_component(instr) : 0; - reg = nir_def_index(&instr->dest.ssa); + reg = nir_def_index(&instr->def); if (is_ubo) { nir_src index = instr->src[0]; @@ -1642,8 +1640,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr) emit_global(ctx, &instr->instr, true, reg, src_offset, seg); } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->inputs->is_blend) { emit_varying_read(ctx, reg, offset, nr_comp, component, - indirect_offset, t | instr->dest.ssa.bit_size, - is_flat); + indirect_offset, t | instr->def.bit_size, is_flat); } else if (ctx->inputs->is_blend) { /* ctx->blend_input will be precoloured to r0/r2, where * the input is preloaded */ @@ -1672,7 +1669,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr) /* Reads 128-bit value raw off the tilebuffer during blending, tasty */ case nir_intrinsic_load_raw_output_pan: { - reg = nir_def_index(&instr->dest.ssa); + reg = nir_def_index(&instr->def); /* T720 and below use different blend opcodes with slightly * different semantics than T760 and up */ @@ -1705,9 +1702,9 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr) } case nir_intrinsic_load_output: { - reg = nir_def_index(&instr->dest.ssa); + reg = nir_def_index(&instr->def); - unsigned bits = instr->dest.ssa.bit_size; + unsigned bits = instr->def.bit_size; midgard_instruction ld; if (bits == 16) @@ -2160,7 +2157,7 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, midgard_instruction ins = { .type = TAG_TEXTURE_4, .mask = 0xF, - .dest = nir_def_index(&instr->dest.ssa), + .dest = nir_def_index(&instr->def), .src = {~0, ~0, ~0, ~0}, .dest_type = instr->dest_type, .swizzle = SWIZZLE_IDENTITY_4, diff --git a/src/panfrost/midgard/midgard_errata_lod.c b/src/panfrost/midgard/midgard_errata_lod.c index 6540519..6a4e4ab 100644 --- a/src/panfrost/midgard/midgard_errata_lod.c +++ b/src/panfrost/midgard/midgard_errata_lod.c @@ -49,14 +49,14 @@ nir_lod_errata_instr(nir_builder *b, nir_instr *instr, void *data) nir_intrinsic_instr *l = nir_intrinsic_instr_create( b->shader, nir_intrinsic_load_sampler_lod_parameters_pan); l->num_components = 3; - nir_def_init(&l->instr, &l->dest.ssa, 3, 32); + nir_def_init(&l->instr, &l->def, 3, 32); /* TODO: Indirect samplers, separate sampler objects XXX */ nir_src idx = nir_src_for_ssa(nir_imm_int(b, tex->texture_index)); nir_src_copy(&l->src[0], &idx, &l->instr); nir_builder_instr_insert(b, &l->instr); - nir_def *params = &l->dest.ssa; + nir_def *params = &l->def; /* Extract the individual components */ nir_def *min_lod = nir_channel(b, params, 0); diff --git a/src/panfrost/midgard/nir_fuse_io_16.c b/src/panfrost/midgard/nir_fuse_io_16.c index 4925bb8..c4fe2f5 100644 --- a/src/panfrost/midgard/nir_fuse_io_16.c +++ b/src/panfrost/midgard/nir_fuse_io_16.c @@ -60,7 +60,7 @@ nir_fuse_io_16(nir_shader *shader) if (intr->intrinsic != nir_intrinsic_load_interpolated_input) continue; - if (intr->dest.ssa.bit_size != 32) + if (intr->def.bit_size != 32) continue; /* We swizzle at a 32-bit level so need a multiple of 2. We could @@ -70,20 +70,19 @@ nir_fuse_io_16(nir_shader *shader) bool valid = true; - nir_foreach_use_including_if(src, &intr->dest.ssa) + nir_foreach_use_including_if(src, &intr->def) valid &= !src->is_if && nir_src_is_f2fmp(src); if (!valid) continue; - intr->dest.ssa.bit_size = 16; + intr->def.bit_size = 16; nir_builder b = nir_builder_at(nir_after_instr(instr)); /* The f2f32(f2fmp(x)) will cancel by opt_algebraic */ - nir_def *conv = nir_f2f32(&b, &intr->dest.ssa); - nir_def_rewrite_uses_after(&intr->dest.ssa, conv, - conv->parent_instr); + nir_def *conv = nir_f2f32(&b, &intr->def); + nir_def_rewrite_uses_after(&intr->def, conv, conv->parent_instr); progress |= true; } diff --git a/src/panfrost/util/pan_collect_varyings.c b/src/panfrost/util/pan_collect_varyings.c index 597cbf9..b5cc72c 100644 --- a/src/panfrost/util/pan_collect_varyings.c +++ b/src/panfrost/util/pan_collect_varyings.c @@ -92,7 +92,7 @@ walk_varyings(UNUSED nir_builder *b, nir_instr *instr, void *data) if (b->shader->info.stage != MESA_SHADER_FRAGMENT) return false; - count = intr->dest.ssa.num_components; + count = intr->def.num_components; break; default: diff --git a/src/panfrost/util/pan_lower_64bit_intrin.c b/src/panfrost/util/pan_lower_64bit_intrin.c index 72682dd..ef4855a 100644 --- a/src/panfrost/util/pan_lower_64bit_intrin.c +++ b/src/panfrost/util/pan_lower_64bit_intrin.c @@ -52,16 +52,16 @@ nir_lower_64bit_intrin_instr(nir_builder *b, nir_instr *instr, void *data) return false; } - if (intr->dest.ssa.bit_size != 64) + if (intr->def.bit_size != 64) return false; b->cursor = nir_after_instr(instr); - intr->dest.ssa.bit_size = 32; + intr->def.bit_size = 32; - nir_def *conv = nir_u2u64(b, &intr->dest.ssa); + nir_def *conv = nir_u2u64(b, &intr->def); - nir_def_rewrite_uses_after(&intr->dest.ssa, conv, conv->parent_instr); + nir_def_rewrite_uses_after(&intr->def, conv, conv->parent_instr); return true; } diff --git a/src/panfrost/util/pan_lower_framebuffer.c b/src/panfrost/util/pan_lower_framebuffer.c index 8436586..9bd2490 100644 --- a/src/panfrost/util/pan_lower_framebuffer.c +++ b/src/panfrost/util/pan_lower_framebuffer.c @@ -547,19 +547,19 @@ pan_lower_fb_load(nir_builder *b, nir_intrinsic_instr *intr, * the result is undefined. */ - unsigned bits = intr->dest.ssa.bit_size; + unsigned bits = intr->def.bit_size; nir_alu_type src_type = nir_alu_type_get_base_type(pan_unpacked_type_for_format(desc)); unpacked = nir_convert_to_bit_size(b, unpacked, src_type, bits); - unpacked = nir_resize_vector(b, unpacked, intr->dest.ssa.num_components); + unpacked = nir_resize_vector(b, unpacked, intr->def.num_components); /* Reorder the components */ if (reorder_comps) unpacked = pan_unpack_reorder(b, desc, unpacked); - nir_def_rewrite_uses_after(&intr->dest.ssa, unpacked, &intr->instr); + nir_def_rewrite_uses_after(&intr->def, unpacked, &intr->instr); } struct inputs { diff --git a/src/panfrost/util/pan_lower_helper_invocation.c b/src/panfrost/util/pan_lower_helper_invocation.c index 6cae500..11e1ae0 100644 --- a/src/panfrost/util/pan_lower_helper_invocation.c +++ b/src/panfrost/util/pan_lower_helper_invocation.c @@ -43,7 +43,7 @@ pan_lower_helper_invocation_instr(nir_builder *b, nir_instr *instr, void *data) nir_def *mask = nir_load_sample_mask_in(b); nir_def *eq = nir_ieq_imm(b, mask, 0); - nir_def_rewrite_uses(&intr->dest.ssa, eq); + nir_def_rewrite_uses(&intr->def, eq); return true; } diff --git a/src/panfrost/util/pan_lower_sample_position.c b/src/panfrost/util/pan_lower_sample_position.c index 531e98c..9ba014d 100644 --- a/src/panfrost/util/pan_lower_sample_position.c +++ b/src/panfrost/util/pan_lower_sample_position.c @@ -55,10 +55,10 @@ pan_lower_sample_pos_impl(struct nir_builder *b, nir_instr *instr, nir_def *decoded = nir_fmul_imm(b, nir_i2f16(b, raw), 1.0 / 256.0); /* Make NIR validator happy */ - if (decoded->bit_size != intr->dest.ssa.bit_size) - decoded = nir_f2fN(b, decoded, intr->dest.ssa.bit_size); + if (decoded->bit_size != intr->def.bit_size) + decoded = nir_f2fN(b, decoded, intr->def.bit_size); - nir_def_rewrite_uses(&intr->dest.ssa, decoded); + nir_def_rewrite_uses(&intr->def, decoded); return true; } diff --git a/src/panfrost/util/pan_lower_xfb.c b/src/panfrost/util/pan_lower_xfb.c index 5bb00b3..212c229 100644 --- a/src/panfrost/util/pan_lower_xfb.c +++ b/src/panfrost/util/pan_lower_xfb.c @@ -74,7 +74,7 @@ lower_xfb(nir_builder *b, nir_instr *instr, UNUSED void *data) nir_def *repl = nir_iadd(b, nir_load_vertex_id_zero_base(b), nir_load_first_vertex(b)); - nir_def_rewrite_uses(&intr->dest.ssa, repl); + nir_def_rewrite_uses(&intr->def, repl); return true; } diff --git a/src/panfrost/vulkan/panvk_vX_meta_copy.c b/src/panfrost/vulkan/panvk_vX_meta_copy.c index 1d2e6ce..b283539 100644 --- a/src/panfrost/vulkan/panvk_vX_meta_copy.c +++ b/src/panfrost/vulkan/panvk_vX_meta_copy.c @@ -336,11 +336,11 @@ panvk_meta_copy_img2img_shader(struct panfrost_device *pdev, nir_tex_src_for_ssa(nir_tex_src_ms_index, nir_load_sample_id(&b)); } - nir_def_init(&tex->instr, &tex->dest.ssa, 4, + nir_def_init(&tex->instr, &tex->def, 4, nir_alu_type_get_type_size(tex->dest_type)); nir_builder_instr_insert(&b, &tex->instr); - nir_def *texel = &tex->dest.ssa; + nir_def *texel = &tex->def; unsigned dstcompsz = util_format_get_component_bits(dstfmt, UTIL_FORMAT_COLORSPACE_RGB, 0); @@ -1361,11 +1361,11 @@ panvk_meta_copy_img2buf_shader(struct panfrost_device *pdev, tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_coord, imgcoords); tex->coord_components = texdim + texisarray; - nir_def_init(&tex->instr, &tex->dest.ssa, 4, + nir_def_init(&tex->instr, &tex->def, 4, nir_alu_type_get_type_size(tex->dest_type)); nir_builder_instr_insert(&b, &tex->instr); - nir_def *texel = &tex->dest.ssa; + nir_def *texel = &tex->def; unsigned fullmask = (1 << util_format_get_nr_components(key.imgfmt)) - 1; unsigned nbufcomps = util_bitcount(fullmask); diff --git a/src/panfrost/vulkan/panvk_vX_nir_lower_descriptors.c b/src/panfrost/vulkan/panvk_vX_nir_lower_descriptors.c index ff59938..d7a4302 100644 --- a/src/panfrost/vulkan/panvk_vX_nir_lower_descriptors.c +++ b/src/panfrost/vulkan/panvk_vX_nir_lower_descriptors.c @@ -273,9 +273,9 @@ lower_res_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin, unreachable("Unhandled resource intrinsic"); } - assert(intrin->dest.ssa.bit_size == res->bit_size); - assert(intrin->dest.ssa.num_components == res->num_components); - nir_def_rewrite_uses(&intrin->dest.ssa, res); + assert(intrin->def.bit_size == res->bit_size); + assert(intrin->def.num_components == res->num_components); + nir_def_rewrite_uses(&intrin->def, res); nir_instr_remove(&intrin->instr); return true; @@ -398,21 +398,21 @@ lower_tex(nir_builder *b, nir_tex_instr *tex, switch (tex->op) { case nir_texop_txs: res = nir_channels(b, load_tex_img_size(b, deref, dim, ctx), - nir_component_mask(tex->dest.ssa.num_components)); + nir_component_mask(tex->def.num_components)); break; case nir_texop_query_levels: - assert(tex->dest.ssa.num_components == 1); + assert(tex->def.num_components == 1); res = load_tex_img_levels(b, deref, dim, ctx); break; case nir_texop_texture_samples: - assert(tex->dest.ssa.num_components == 1); + assert(tex->def.num_components == 1); res = load_tex_img_samples(b, deref, dim, ctx); break; default: unreachable("Unsupported texture query op"); } - nir_def_rewrite_uses(&tex->dest.ssa, res); + nir_def_rewrite_uses(&tex->def, res); nir_instr_remove(&tex->instr); return true; } @@ -505,7 +505,7 @@ lower_img_intrinsic(nir_builder *b, nir_intrinsic_instr *intr, switch (intr->intrinsic) { case nir_intrinsic_image_deref_size: res = nir_channels(b, load_tex_img_size(b, deref, dim, ctx), - nir_component_mask(intr->dest.ssa.num_components)); + nir_component_mask(intr->def.num_components)); break; case nir_intrinsic_image_deref_samples: res = load_tex_img_samples(b, deref, dim, ctx); @@ -514,7 +514,7 @@ lower_img_intrinsic(nir_builder *b, nir_intrinsic_instr *intr, unreachable("Unsupported image query op"); } - nir_def_rewrite_uses(&intr->dest.ssa, res); + nir_def_rewrite_uses(&intr->def, res); nir_instr_remove(&intr->instr); } else { nir_rewrite_image_intrinsic(intr, get_img_index(b, deref, ctx), false); diff --git a/src/panfrost/vulkan/panvk_vX_shader.c b/src/panfrost/vulkan/panvk_vX_shader.c index c8f8501..cc0241e 100644 --- a/src/panfrost/vulkan/panvk_vX_shader.c +++ b/src/panfrost/vulkan/panvk_vX_shader.c @@ -48,11 +48,11 @@ static nir_def * load_sysval_from_ubo(nir_builder *b, nir_intrinsic_instr *intr, unsigned offset) { - return nir_load_ubo( - b, intr->dest.ssa.num_components, intr->dest.ssa.bit_size, - nir_imm_int(b, PANVK_SYSVAL_UBO_INDEX), nir_imm_int(b, offset), - .align_mul = intr->dest.ssa.bit_size / 8, .align_offset = 0, - .range_base = offset, .range = intr->dest.ssa.bit_size / 8); + return nir_load_ubo(b, intr->def.num_components, intr->def.bit_size, + nir_imm_int(b, PANVK_SYSVAL_UBO_INDEX), + nir_imm_int(b, offset), + .align_mul = intr->def.bit_size / 8, .align_offset = 0, + .range_base = offset, .range = intr->def.bit_size / 8); } struct sysval_options { @@ -116,7 +116,7 @@ panvk_lower_sysvals(nir_builder *b, nir_instr *instr, void *data) #undef SYSVAL b->cursor = nir_after_instr(instr); - nir_def_rewrite_uses(&intr->dest.ssa, val); + nir_def_rewrite_uses(&intr->def, val); return true; } @@ -190,12 +190,12 @@ panvk_lower_load_push_constant(nir_builder *b, nir_instr *instr, void *data) b->cursor = nir_before_instr(instr); nir_def *ubo_load = - nir_load_ubo(b, intr->dest.ssa.num_components, intr->dest.ssa.bit_size, + nir_load_ubo(b, intr->def.num_components, intr->def.bit_size, nir_imm_int(b, PANVK_PUSH_CONST_UBO_INDEX), intr->src[0].ssa, - .align_mul = intr->dest.ssa.bit_size / 8, .align_offset = 0, + .align_mul = intr->def.bit_size / 8, .align_offset = 0, .range_base = nir_intrinsic_base(intr), .range = nir_intrinsic_range(intr)); - nir_def_rewrite_uses(&intr->dest.ssa, ubo_load); + nir_def_rewrite_uses(&intr->def, ubo_load); nir_instr_remove(instr); return true; } diff --git a/src/vulkan/runtime/vk_nir_convert_ycbcr.c b/src/vulkan/runtime/vk_nir_convert_ycbcr.c index 85dfc95..f7bf29d 100644 --- a/src/vulkan/runtime/vk_nir_convert_ycbcr.c +++ b/src/vulkan/runtime/vk_nir_convert_ycbcr.c @@ -173,12 +173,12 @@ get_texture_size(struct ycbcr_state *state, nir_deref_instr *texture) tex->dest_type = nir_type_int32; tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, - &texture->dest.ssa); + &texture->def); - nir_def_init(&tex->instr, &tex->dest.ssa, nir_tex_instr_dest_size(tex), 32); + nir_def_init(&tex->instr, &tex->def, nir_tex_instr_dest_size(tex), 32); nir_builder_instr_insert(b, &tex->instr); - state->image_size = nir_i2f32(b, &tex->dest.ssa); + state->image_size = nir_i2f32(b, &tex->def); return state->image_size; } @@ -270,11 +270,11 @@ create_plane_tex_instr_implicit(struct ycbcr_state *state, tex->sampler_index = old_tex->sampler_index; tex->is_array = old_tex->is_array; - nir_def_init(&tex->instr, &tex->dest.ssa, old_tex->dest.ssa.num_components, - old_tex->dest.ssa.bit_size); + nir_def_init(&tex->instr, &tex->def, old_tex->def.num_components, + old_tex->def.bit_size); nir_builder_instr_insert(b, &tex->instr); - return &tex->dest.ssa; + return &tex->def; } static unsigned @@ -437,7 +437,7 @@ lower_ycbcr_tex_instr(nir_builder *b, nir_instr *instr, void *_state) swizzled_bpcs); } - nir_def_rewrite_uses(&tex->dest.ssa, result); + nir_def_rewrite_uses(&tex->def, result); nir_instr_remove(&tex->instr); return true;