bi_foreach_instr_in_block_rev(block, I) {
/* If a destination is required by helper invocation... */
bi_foreach_dest(I, d) {
- if (bi_is_null(I->dest[d]))
- continue;
-
if (!BITSET_TEST(deps, bi_get_node(I->dest[d])))
continue;
bool exec = false;
bi_foreach_dest(I, d) {
- if (I->dest[d].type == BI_INDEX_NORMAL)
- exec |= BITSET_TEST(deps, bi_get_node(I->dest[d]));
+ assert(I->dest[d].type == BI_INDEX_NORMAL);
+ exec |= BITSET_TEST(deps, bi_get_node(I->dest[d]));
}
I->skip = !exec;
/* Writes depend on reads and writes */
bi_foreach_dest(I, s) {
bi_index dest = I->dest[s];
+ assert(dest.type == BI_INDEX_NORMAL);
- if (dest.type == BI_INDEX_NORMAL) {
- add_dep(node, last_read[label_index(ctx, dest)]);
- add_dep(node, last_write[label_index(ctx, dest)]);
+ add_dep(node, last_read[label_index(ctx, dest)]);
+ add_dep(node, last_write[label_index(ctx, dest)]);
- last_write[label_index(ctx, dest)] = node;
- }
+ last_write[label_index(ctx, dest)] = node;
}
bi_foreach_src(I, s) {
/* Destinations must be unique */
bi_foreach_dest(I, d) {
unsigned node = bi_get_node(I->dest[d]);
+ assert(node < max);
- if (node < max && live[node])
+ if (live[node])
delta -= bi_count_write_registers(I, d);
}
bi_foreach_dest(ins, d) {
unsigned node = bi_get_node(ins->dest[d]);
-
- if (node >= node_count)
- continue;
+ assert(node < node_count);
/* Don't allocate to anything that's read later as a
* preloaded register. The affinity is the intersection
bi_foreach_instr_global(ctx, ins) {
bi_foreach_dest(ins, d) {
unsigned dest = bi_get_node(ins->dest[d]);
+ assert(dest < node_count);
- if (dest < node_count)
- l->affinity[dest] = default_affinity;
+ l->affinity[dest] = default_affinity;
}
/* Blend shaders expect the src colour to be in r0-r3 */
bi_foreach_instr_global(ctx, ins) {
bi_foreach_dest(ins, d) {
unsigned node = bi_get_node(ins->dest[d]);
-
- if (node >= l->node_count)
- continue;
+ assert(node < l->node_count);
/* Don't allow spilling coverage mask writes because the
* register preload logic assumes it will stay in R60.
assert(src.offset == 0);
bi_foreach_dest(I, i) {
- assert(!bi_is_null(I->dest[i]));
-
src.offset = i;
bi_mov_i32_to(&b, I->dest[i], src);
bi_foreach_instr_global(ctx, I) {
bi_foreach_dest(I, d) {
- if (I->dest[d].type == BI_INDEX_NORMAL)
- I->dest[d].value = find_or_allocate_temp(map, I->dest[d].value, &ctx->ssa_alloc);
+ I->dest[d].value = find_or_allocate_temp(map, I->dest[d].value, &ctx->ssa_alloc);
}
bi_foreach_src(I, s) {
}
bi_foreach_dest(ins, d) {
- if (ins->dest[d].type != BI_INDEX_REGISTER) continue;
+ assert(ins->dest[d].type == BI_INDEX_REGISTER);
unsigned dest = ins->dest[d].value;
unsigned count = bi_count_write_registers(ins, d);
if (d == 0 && bi_opcode_props[instr->op].sr_write)
continue;
- if (bi_is_null(instr->dest[d]))
- continue;
-
assert(instr->dest[0].type == BI_INDEX_REGISTER);
if (live_after_temp & BITFIELD64_BIT(instr->dest[0].value))
count++;
* instruction can't be scheduled */
if (bi_opcode_props[instr->op].sr_write) {
bi_foreach_dest(instr, d) {
- if (bi_is_null(instr->dest[d]))
- continue;
-
unsigned nr = bi_count_write_registers(instr, d);
assert(instr->dest[d].type == BI_INDEX_REGISTER);
unsigned reg = instr->dest[d].value;
bi_foreach_instr_global(ctx, I) {
bi_foreach_dest(I, d) {
- if (bi_is_null(I->dest[d])) continue;
if (!bi_is_ssa(I->dest[d])) continue;
unsigned v = I->dest[d].value;
uint64_t mask = 0;
bi_foreach_dest(I, d) {
- if (bi_is_null(I->dest[d])) continue;
-
assert(I->dest[d].type == BI_INDEX_REGISTER);
unsigned reg = I->dest[d].value;
bi_writes_reg(const bi_instr *I, unsigned reg)
{
bi_foreach_dest(I, d) {
- if (bi_is_null(I->dest[d]))
- continue;
-
assert(I->dest[d].type == BI_INDEX_REGISTER);
unsigned count = bi_count_write_registers(I, d);