return false;
}
-/* get allocated dest register for nir_dest
+/* get allocated dest register for nir_def
* *p_swiz tells how the components need to be placed into register
*/
static hw_dst
-ra_dest(struct etna_compile *c, nir_dest *dest, unsigned *p_swiz)
+ra_def(struct etna_compile *c, nir_def *def, unsigned *p_swiz)
{
unsigned swiz = INST_SWIZ_IDENTITY, mask = 0xf;
- dest = real_dest(dest, &swiz, &mask);
+ def = real_def(def, &swiz, &mask);
- unsigned r = ra_get_node_reg(c->g, c->live_map[dest_index(c->impl, dest)]);
+ unsigned r = ra_get_node_reg(c->g, c->live_map[def_index(c->impl, def)]);
unsigned t = reg_get_type(r);
*p_swiz = inst_swiz_compose(swiz, reg_dst_swiz[t]);
assert(!(alu->op >= nir_op_vec2 && alu->op <= nir_op_vec4));
unsigned dst_swiz;
- hw_dst dst = ra_dest(c, &alu->dest.dest, &dst_swiz);
+ hw_dst dst = ra_def(c, &alu->dest.dest.ssa, &dst_swiz);
switch (alu->op) {
case nir_op_fdot2:
emit_tex(struct etna_compile *c, nir_tex_instr * tex)
{
unsigned dst_swiz;
- hw_dst dst = ra_dest(c, &tex->dest, &dst_swiz);
+ hw_dst dst = ra_def(c, &tex->dest.ssa, &dst_swiz);
nir_src *coord = NULL, *src1 = NULL, *src2 = NULL;
for (unsigned i = 0; i < tex->num_srcs; i++) {
break;
case nir_intrinsic_load_uniform: {
unsigned dst_swiz;
- struct etna_inst_dst dst = ra_dest(c, &intr->dest, &dst_swiz);
+ struct etna_inst_dst dst = ra_def(c, &intr->dest.ssa, &dst_swiz);
/* TODO: rework so extra MOV isn't required, load up to 4 addresses at once */
emit_inst(c, &(struct etna_inst) {
emit_inst(c, &(struct etna_inst) {
.opcode = INST_OPCODE_LOAD,
.type = INST_TYPE_U32,
- .dst = ra_dest(c, &intr->dest, &dst_swiz),
+ .dst = ra_def(c, &intr->dest.ssa, &dst_swiz),
.src[0] = get_src(c, &intr->src[1]),
.src[1] = const_src(c, &CONST_VAL(ETNA_UNIFORM_UBO0_ADDR + idx, 0), 1),
});
return src->ssa->index;
}
-/* get unique ssa/reg index for nir_dest */
+/* get unique ssa/reg index for nir_def */
static inline unsigned
-dest_index(nir_function_impl *impl, nir_dest *dest)
+def_index(nir_function_impl *impl, nir_def *def)
{
- nir_intrinsic_instr *store = nir_store_reg_for_def(&dest->ssa);
+ nir_intrinsic_instr *store = nir_store_reg_for_def(def);
if (store) {
nir_def *reg = store->src[1].ssa;
return reg->index;
}
- return dest->ssa.index;
+ return def->index;
}
static inline void
-update_swiz_mask(nir_alu_instr *alu, nir_dest *dest, unsigned *swiz, unsigned *mask)
+update_swiz_mask(nir_alu_instr *alu, nir_def *def, unsigned *swiz, unsigned *mask)
{
if (!swiz)
return;
- bool is_vec = dest != NULL;
+ bool is_vec = def != NULL;
unsigned swizzle = 0, write_mask = 0;
- for (unsigned i = 0; i < nir_dest_num_components(alu->dest.dest); i++) {
+ for (unsigned i = 0; i < def->num_components; i++) {
/* src is different (only check for vecN) */
- if (is_vec && alu->src[i].src.ssa != &dest->ssa)
+ if (is_vec && alu->src[i].src.ssa != def)
continue;
unsigned src_swiz = is_vec ? alu->src[i].swizzle[0] : alu->src[0].swizzle[i];
*mask = write_mask;
}
-static nir_dest *
-real_dest(nir_dest *dest, unsigned *swiz, unsigned *mask)
+static nir_def *
+real_def(nir_def *def, unsigned *swiz, unsigned *mask)
{
- if (!dest)
- return dest;
+ if (!def)
+ return def;
- bool can_bypass_src = !nir_def_used_by_if(&dest->ssa);
- nir_instr *p_instr = dest->ssa.parent_instr;
+ bool can_bypass_src = !nir_def_used_by_if(def);
+ nir_instr *p_instr = def->parent_instr;
/* if used by a vecN, the "real" destination becomes the vecN destination
* lower_alu guarantees that values used by a vecN are only used by that vecN
* we can apply the same logic to movs in a some cases too
*/
- nir_foreach_use(use_src, &dest->ssa) {
+ nir_foreach_use(use_src, def) {
nir_instr *instr = use_src->parent_instr;
/* src bypass check: for now only deal with tex src mov case
case nir_op_vec2:
case nir_op_vec3:
case nir_op_vec4:
- assert(!nir_def_used_by_if(&dest->ssa));
- nir_foreach_use(use_src, &dest->ssa)
+ assert(!nir_def_used_by_if(def));
+ nir_foreach_use(use_src, def)
assert(use_src->parent_instr == instr);
- update_swiz_mask(alu, dest, swiz, mask);
+ update_swiz_mask(alu, def, swiz, mask);
break;
case nir_op_mov: {
- switch (dest->ssa.parent_instr->type) {
+ switch (def->parent_instr->type) {
case nir_instr_type_alu:
case nir_instr_type_tex:
break;
default:
continue;
}
- if (nir_def_used_by_if(&dest->ssa) ||
- list_length(&dest->ssa.uses) > 1)
+ if (nir_def_used_by_if(def) || list_length(&def->uses) > 1)
continue;
update_swiz_mask(alu, NULL, swiz, mask);
assert(!(instr->pass_flags & BYPASS_SRC));
instr->pass_flags |= BYPASS_DST;
- return real_dest(&alu->dest.dest, swiz, mask);
+ return real_def(&alu->dest.dest.ssa, swiz, mask);
}
if (can_bypass_src && !(p_instr->pass_flags & BYPASS_DST)) {
return NULL;
}
- return dest;
+ return def;
}
-/* if instruction dest needs a register, return nir_dest for it */
-static inline nir_dest *
-dest_for_instr(nir_instr *instr)
+/* if instruction dest needs a register, return nir_def for it */
+static inline nir_def *
+def_for_instr(nir_instr *instr)
{
- nir_dest *dest = NULL;
+ nir_def *def = NULL;
switch (instr->type) {
case nir_instr_type_alu:
- dest = &nir_instr_as_alu(instr)->dest.dest;
+ def = &nir_instr_as_alu(instr)->dest.dest.ssa;
break;
case nir_instr_type_tex:
- dest = &nir_instr_as_tex(instr)->dest;
+ def = &nir_instr_as_tex(instr)->dest.ssa;
break;
case nir_instr_type_intrinsic: {
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
intr->intrinsic == nir_intrinsic_load_instance_id ||
intr->intrinsic == nir_intrinsic_load_texture_scale ||
intr->intrinsic == nir_intrinsic_load_texture_size_etna)
- dest = &intr->dest;
+ def = &intr->dest.ssa;
} break;
case nir_instr_type_deref:
return NULL;
default:
break;
}
- return real_dest(dest, NULL, NULL);
+ return real_def(def, NULL, NULL);
}
struct live_def {
nir_instr *instr;
- nir_dest *dest; /* cached dest_for_instr */
+ nir_def *def; /* cached def_for_instr */
unsigned live_start, live_end; /* live range */
};
nir_foreach_block(block, impl) {
block_live_index[block->index] = state.num_defs;
nir_foreach_instr(instr, block) {
- nir_dest *dest = dest_for_instr(instr);
- if (!dest)
+ nir_def *def = def_for_instr(instr);
+ if (!def)
continue;
- unsigned idx = dest_index(impl, dest);
+ unsigned idx = def_index(impl, def);
/* register is already in defs */
if (live_map[idx] != ~0u)
continue;
- defs[state.num_defs] = (struct live_def) {instr, dest, state.num_defs, 0};
+ defs[state.num_defs] = (struct live_def) {instr, def, state.num_defs, 0};
/* input live from the start */
if (instr->type == nir_instr_type_intrinsic) {
/* set classes from num_components */
for (unsigned i = 0; i < num_nodes; i++) {
nir_instr *instr = defs[i].instr;
- nir_dest *dest = defs[i].dest;
- unsigned comp = nir_dest_num_components(*dest) - 1;
+ nir_def *def = defs[i].def;
+ unsigned comp = def->num_components - 1;
if (instr->type == nir_instr_type_alu &&
c->specs->has_new_transcendentals) {
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
/* can't have dst swizzle or sparse writemask on UBO loads */
if (intr->intrinsic == nir_intrinsic_load_ubo) {
- assert(dest == &intr->dest);
- if (dest->ssa.num_components == 2)
+ assert(def == &intr->dest.ssa);
+ if (def->num_components == 2)
comp = REG_CLASS_VIRT_VEC2C;
- if (dest->ssa.num_components == 3)
+ if (def->num_components == 3)
comp = REG_CLASS_VIRT_VEC3C;
}
}
if (instr->type != nir_instr_type_intrinsic)
continue;
- nir_dest *dest = dest_for_instr(instr);
+ nir_def *def = def_for_instr(instr);
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
unsigned reg;
REG_TYPE_VIRT_VEC2_XY,
REG_TYPE_VIRT_VEC3_XYZ,
REG_TYPE_VEC4,
- }[nir_dest_num_components(*dest) - 1];
+ }[def->num_components - 1];
break;
case nir_intrinsic_load_instance_id:
reg = c->variant->infile.num_reg * NUM_REG_TYPES + REG_TYPE_VIRT_SCALAR_Y;
continue;
}
- ra_set_node_reg(g, live_map[dest_index(impl, dest)], reg);
+ ra_set_node_reg(g, live_map[def_index(impl, def)], reg);
}
}