nir_ssa_def *
nir_ssa_for_src(nir_builder *build, nir_src src, int num_components)
{
- if (src.is_ssa && src.ssa->num_components == num_components)
+ if (src.ssa->num_components == num_components)
return src.ssa;
assert((unsigned)num_components <= nir_src_num_components(src));
static inline nir_ssa_def *
nir_mov_alu(nir_builder *build, nir_alu_src src, unsigned num_components)
{
- if (src.src.is_ssa && src.src.ssa->num_components == num_components) {
+ if (src.src.ssa->num_components == num_components) {
bool any_swizzles = false;
for (unsigned i = 0; i < num_components; i++) {
if (src.swizzle[i] != i)
nir_if *if_stmt = nir_cf_node_as_if(node);
nir_src_set_parent_if(&if_stmt->condition, if_stmt);
- if (if_stmt->condition.is_ssa) {
- list_addtail(&if_stmt->condition.use_link,
- &if_stmt->condition.ssa->uses);
- } else {
- list_addtail(&if_stmt->condition.use_link,
- &if_stmt->condition.reg.reg->uses);
- }
+ list_addtail(&if_stmt->condition.use_link,
+ &if_stmt->condition.ssa->uses);
}
/**
BITSET_WORD *int_types, bool *progress)
{
bool src_is_sink = nir_src_is_const(src) || nir_src_is_undef(src);
- if (src.is_ssa && dest->is_ssa) {
- copy_type(src.ssa->index, dest->ssa.index, src_is_sink, float_types, progress);
- copy_type(src.ssa->index, dest->ssa.index, src_is_sink, int_types, progress);
- }
+ copy_type(src.ssa->index, dest->ssa.index, src_is_sink, float_types, progress);
+ copy_type(src.ssa->index, dest->ssa.index, src_is_sink, int_types, progress);
}
/** Gather up ALU types for SSA values
static bool
src_does_not_use_def(nir_src *src, void *def)
{
- return !src->is_ssa || src->ssa != (nir_ssa_def *)def;
+ return src->ssa != (nir_ssa_def *)def;
}
static bool
* so we need to also check the following if condition, if any.
*/
nir_if *following_if = nir_block_get_following_if(start->block);
- if (following_if && following_if->condition.is_ssa &&
- following_if->condition.ssa == def)
+ if (following_if && following_if->condition.ssa == def)
return true;
return false;
static bool
is_dest_live(const nir_dest *dest, BITSET_WORD *defs_live)
{
- return !dest->is_ssa || BITSET_TEST(defs_live, dest->ssa.index);
+ return BITSET_TEST(defs_live, dest->ssa.index);
}
static bool
mark_src_live(const nir_src *src, BITSET_WORD *defs_live)
{
- if (src->is_ssa && !BITSET_TEST(defs_live, src->ssa->index)) {
+ if (!BITSET_TEST(defs_live, src->ssa->index)) {
BITSET_SET(defs_live, src->ssa->index);
return true;
} else {
bool progress = false;
nir_block *next_blk = nir_cf_node_cf_tree_next(&nif->cf_node);
- if (!next_blk || !nif->condition.is_ssa)
+ if (!next_blk)
return false;
nir_if *next_if = nir_block_get_following_if(next_blk);
- if (!next_if || !next_if->condition.is_ssa)
+ if (!next_if)
return false;
/* Here we merge two consecutive ifs that have the same condition e.g:
* lower register pressure.
*/
-static inline bool
-src_is_ssa(nir_src *src, void *state)
-{
- return src->is_ssa;
-}
-
-static inline bool
-instr_reads_register(nir_instr *instr)
-{
- return !nir_foreach_src(instr, src_is_ssa, NULL);
-}
-
static bool
nir_opt_move_block(nir_block *block, nir_move_options options)
{
* the original order is kept.
*/
unsigned index = 1;
- unsigned last_reg_def_index = 0;
nir_foreach_instr_reverse_safe(instr, block) {
instr->index = index++;
- /* Don't move register defs */
- if (nir_instr_def_is_register(instr)) {
- last_reg_def_index = instr->index;
- continue;
- }
-
/* Check if this instruction can be moved downwards */
if (!nir_can_move_instr(instr, options))
continue;
if (nir_instr_prev(first_user) == instr)
continue;
- /* Don't move register reads past register defs */
- if (first_user->index < last_reg_def_index &&
- instr_reads_register(instr)) {
- continue;
- }
-
/* Insert the instruction before it's first user */
exec_node_remove(&instr->node);
instr->index = first_user->index;
return val;
nir_alu_instr *alu = nir_instr_as_alu(val.def->parent_instr);
- if (alu->op != nir_op_iadd ||
- !alu->src[0].src.is_ssa ||
- !alu->src[1].src.is_ssa)
+ if (alu->op != nir_op_iadd)
return val;
nir_ssa_scalar src[2] = {
nir_src *off_src = &intrin->src[offset_src_idx];
nir_ssa_def *replace_src = NULL;
- if (!off_src->is_ssa || off_src->ssa->bit_size != 32)
+ if (off_src->ssa->bit_size != 32)
return false;
if (!nir_src_is_const(*off_src)) {
/* Trim the num_components stored according to the write mask. */
unsigned write_mask = nir_intrinsic_write_mask(instr);
unsigned last_bit = util_last_bit(write_mask);
- if (last_bit < instr->num_components && instr->src[0].is_ssa) {
+ if (last_bit < instr->num_components) {
nir_ssa_def *def = nir_trim_vector(b, instr->src[0].ssa, last_bit);
nir_instr_rewrite_src(&instr->instr,
&instr->src[0],
return false;
for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
- if (!alu->src[i].src.is_ssa ||
- alu->src[i].src.ssa->parent_instr->type != nir_instr_type_ssa_undef)
+ if (alu->src[i].src.ssa->parent_instr->type != nir_instr_type_ssa_undef)
return false;
}
if (nir_op_is_vec(alu->op)) {
for (int i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
- if (alu->src[i].src.is_ssa &&
- alu->src[i].src.ssa->parent_instr->type ==
+ if (alu->src[i].src.ssa->parent_instr->type ==
nir_instr_type_ssa_undef) {
undef |= BITSET_MASK(nir_ssa_alu_instr_src_components(alu, i)) << i;
}
static void
add_src(nir_src *src, struct set *invariants)
{
- if (src->is_ssa) {
- _mesa_set_add(invariants, src->ssa);
- } else {
- _mesa_set_add(invariants, src->reg.reg);
- }
+ _mesa_set_add(invariants, src->ssa);
}
static bool
static bool
dest_is_invariant(nir_dest *dest, struct set *invariants)
{
- if (dest->is_ssa) {
- return _mesa_set_search(invariants, &dest->ssa);
- } else {
- return _mesa_set_search(invariants, dest->reg.reg);
- }
+ return _mesa_set_search(invariants, &dest->ssa);
}
static void
if (deref->deref_type == nir_deref_type_array ||
deref->deref_type == nir_deref_type_ptr_as_array) {
- header.deref.packed_src_ssa_16bit =
- deref->arr.index.is_ssa && are_object_ids_16bit(ctx);
+ header.deref.packed_src_ssa_16bit = are_object_ids_16bit(ctx);
header.deref.in_bounds = deref->arr.in_bounds;
}