From 6189274f5751ed3595e42f044f4f8ce386f5a272 Mon Sep 17 00:00:00 2001 From: Alyssa Rosenzweig Date: Wed, 21 Aug 2019 09:15:56 -0700 Subject: [PATCH] pan/midgard: Represent unused nodes by ~0 This allows nodes to be unsigned and prevents a class of weird signedness bugs identified by Coverity. Signed-off-by: Alyssa Rosenzweig --- src/panfrost/midgard/compiler.h | 8 ++--- src/panfrost/midgard/helpers.h | 7 ++-- src/panfrost/midgard/midgard_compile.c | 44 +++++++++++++------------- src/panfrost/midgard/midgard_derivatives.c | 2 +- src/panfrost/midgard/midgard_opt_dce.c | 2 +- src/panfrost/midgard/midgard_opt_invert.c | 2 +- src/panfrost/midgard/midgard_opt_perspective.c | 2 +- src/panfrost/midgard/midgard_print.c | 2 +- src/panfrost/midgard/midgard_ra.c | 27 ++++++++-------- src/panfrost/midgard/midgard_schedule.c | 16 +++++----- src/panfrost/midgard/mir.c | 2 +- 11 files changed, 56 insertions(+), 58 deletions(-) diff --git a/src/panfrost/midgard/compiler.h b/src/panfrost/midgard/compiler.h index edf0c10..75f9e4a 100644 --- a/src/panfrost/midgard/compiler.h +++ b/src/panfrost/midgard/compiler.h @@ -68,11 +68,11 @@ typedef struct midgard_branch { } midgard_branch; /* Instruction arguments represented as block-local SSA indices, rather than - * registers. Negative values mean unused. */ + * registers. ~0 means unused. */ typedef struct { - int src[3]; - int dest; + unsigned src[3]; + unsigned dest; bool inline_constant; } ssa_args; @@ -534,7 +534,7 @@ v_mov(unsigned src, midgard_vector_alu_src mod, unsigned dest) .type = TAG_ALU_4, .mask = 0xF, .ssa_args = { - .src = { SSA_UNUSED_1, src, -1 }, + .src = { SSA_UNUSED, src, SSA_UNUSED }, .dest = dest, }, .alu = { diff --git a/src/panfrost/midgard/helpers.h b/src/panfrost/midgard/helpers.h index ffcf9d8..03fcc01 100644 --- a/src/panfrost/midgard/helpers.h +++ b/src/panfrost/midgard/helpers.h @@ -175,12 +175,9 @@ quadword_size(int tag) #define REGISTER_TEXTURE_BASE 28 #define REGISTER_SELECT 31 -/* SSA helper aliases to mimic the registers. UNUSED_0 encoded as an inline - * constant. UNUSED_1 encoded as REGISTER_UNUSED */ - -#define SSA_UNUSED_0 0 -#define SSA_UNUSED_1 -2 +/* SSA helper aliases to mimic the registers. */ +#define SSA_UNUSED ~0 #define SSA_FIXED_SHIFT 24 #define SSA_FIXED_REGISTER(reg) (((1 + (reg)) << SSA_FIXED_SHIFT) | 1) #define SSA_REG_FROM_FIXED(reg) ((((reg) & ~1) >> SSA_FIXED_SHIFT) - 1) diff --git a/src/panfrost/midgard/midgard_compile.c b/src/panfrost/midgard/midgard_compile.c index c004c95..557e683 100644 --- a/src/panfrost/midgard/midgard_compile.c +++ b/src/panfrost/midgard/midgard_compile.c @@ -105,8 +105,8 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor) .type = TAG_LOAD_STORE_4, \ .mask = 0xF, \ .ssa_args = { \ - .dest = -1, \ - .src = { -1, -1, -1 }, \ + .dest = ~0, \ + .src = { ~0, ~0, ~0 }, \ }, \ .load_store = { \ .op = midgard_op_##name, \ @@ -213,8 +213,8 @@ v_alu_br_compact_cond(midgard_jmp_writeout_op op, unsigned tag, signed offset, u .compact_branch = true, .br_compact = compact, .ssa_args = { - .dest = -1, - .src = { -1, -1, -1 }, + .dest = ~0, + .src = { ~0, ~0, ~0 }, } }; @@ -236,8 +236,8 @@ v_branch(bool conditional, bool invert) .invert_conditional = invert }, .ssa_args = { - .dest = -1, - .src = { -1, -1, -1 }, + .dest = ~0, + .src = { ~0, ~0, ~0 }, } }; @@ -338,7 +338,7 @@ midgard_nir_sysval_for_intrinsic(nir_intrinsic_instr *instr) case nir_intrinsic_store_ssbo: return midgard_sysval_for_ssbo(instr); default: - return -1; + return ~0; } } @@ -622,7 +622,7 @@ emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned co .mask = 1 << COMPONENT_W, .ssa_args = { - .src = { condition, condition, -1 }, + .src = { condition, condition, ~0 }, .dest = SSA_FIXED_REGISTER(31), }, @@ -661,7 +661,7 @@ emit_condition_mixed(compiler_context *ctx, nir_alu_src *src, unsigned nr_comp) .precede_break = true, .mask = mask_of(nr_comp), .ssa_args = { - .src = { condition, condition, -1 }, + .src = { condition, condition, ~0 }, .dest = SSA_FIXED_REGISTER(31), }, .alu = { @@ -1021,7 +1021,7 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) * needs it, or else we may segfault. */ unsigned src0 = nir_alu_src_index(ctx, &instr->src[0]); - unsigned src1 = nr_inputs == 2 ? nir_alu_src_index(ctx, &instr->src[1]) : SSA_UNUSED_0; + unsigned src1 = nr_inputs == 2 ? nir_alu_src_index(ctx, &instr->src[1]) : ~0; /* Rather than use the instruction generation helpers, we do it * ourselves here to avoid the mess */ @@ -1030,9 +1030,9 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) .type = TAG_ALU_4, .ssa_args = { .src = { - quirk_flipped_r24 ? SSA_UNUSED_1 : src0, - quirk_flipped_r24 ? src0 : src1, - -1 + quirk_flipped_r24 ? ~0 : src0, + quirk_flipped_r24 ? src0 : src1, + ~0 }, .dest = dest, } @@ -1370,13 +1370,13 @@ emit_fragment_store(compiler_context *ctx, unsigned src, unsigned rt) midgard_instruction rt_move = { .ssa_args = { - .dest = -1 + .dest = ~0 } }; if (rt != 0) { /* We'll write to r1.z */ - rt_move = v_mov(-1, blank_alu_src, SSA_FIXED_REGISTER(1)); + rt_move = v_mov(~0, blank_alu_src, SSA_FIXED_REGISTER(1)); rt_move.mask = 1 << COMPONENT_Z; rt_move.unit = UNIT_SADD; @@ -1627,7 +1627,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr) case nir_intrinsic_load_viewport_scale: case nir_intrinsic_load_viewport_offset: case nir_intrinsic_load_num_work_groups: - emit_sysval_read(ctx, &instr->instr, -1, 3); + emit_sysval_read(ctx, &instr->instr, ~0, 3); break; case nir_intrinsic_load_work_group_id: @@ -1733,7 +1733,7 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, .mask = 0xF, .ssa_args = { .dest = nir_dest_index(ctx, &instr->dest), - .src = { -1, -1, -1 }, + .src = { ~0, ~0, ~0 }, }, .texture = { .op = midgard_texop, @@ -1867,7 +1867,7 @@ emit_tex(compiler_context *ctx, nir_tex_instr *instr) emit_texop_native(ctx, instr, TEXTURE_OP_TEXEL_FETCH); break; case nir_texop_txs: - emit_sysval_read(ctx, &instr->instr, -1, 4); + emit_sysval_read(ctx, &instr->instr, ~0, 4); break; default: unreachable("Unhanlded texture op"); @@ -2158,7 +2158,7 @@ embedded_to_inline_constant(compiler_context *ctx) /* Get rid of the embedded constant */ ins->has_constants = false; - ins->ssa_args.src[1] = -1; + ins->ssa_args.src[1] = ~0; ins->ssa_args.inline_constant = true; ins->inline_constant = scaled_constant; } @@ -2260,7 +2260,7 @@ static void emit_fragment_epilogue(compiler_context *ctx) { /* Just emit the last chunk with the branch */ - EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always); + EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, ~0, midgard_condition_always); } static midgard_block * @@ -2291,8 +2291,8 @@ emit_block(compiler_context *ctx, nir_block *block) this_block->is_scheduled = false; ++ctx->block_count; - ctx->texture_index[0] = -1; - ctx->texture_index[1] = -1; + ctx->texture_index[0] = ~0; + ctx->texture_index[1] = ~0; /* Set up current block */ list_inithead(&this_block->instructions); diff --git a/src/panfrost/midgard/midgard_derivatives.c b/src/panfrost/midgard/midgard_derivatives.c index 0f15af3..9a15063 100644 --- a/src/panfrost/midgard/midgard_derivatives.c +++ b/src/panfrost/midgard/midgard_derivatives.c @@ -96,7 +96,7 @@ midgard_emit_derivatives(compiler_context *ctx, nir_alu_instr *instr) .mask = mask_of(nr_components), .ssa_args = { .dest = nir_dest_index(ctx, &instr->dest.dest), - .src = { nir_alu_src_index(ctx, &instr->src[0]), -1, -1 }, + .src = { nir_alu_src_index(ctx, &instr->src[0]), ~0, ~0 }, }, .texture = { .op = mir_derivative_op(instr->op), diff --git a/src/panfrost/midgard/midgard_opt_dce.c b/src/panfrost/midgard/midgard_opt_dce.c index f201183..57768ed 100644 --- a/src/panfrost/midgard/midgard_opt_dce.c +++ b/src/panfrost/midgard/midgard_opt_dce.c @@ -103,7 +103,7 @@ midgard_opt_post_move_eliminate(compiler_context *ctx, midgard_block *block, str unsigned iA = ins->ssa_args.dest; unsigned iB = ins->ssa_args.src[1]; - if ((iA < 0) || (iB < 0)) continue; + if ((iA == ~0) || (iB == ~0)) continue; unsigned A = iA >= SSA_FIXED_MINIMUM ? SSA_REG_FROM_FIXED(iA) : diff --git a/src/panfrost/midgard/midgard_opt_invert.c b/src/panfrost/midgard/midgard_opt_invert.c index b68c98c..592a5d3 100644 --- a/src/panfrost/midgard/midgard_opt_invert.c +++ b/src/panfrost/midgard/midgard_opt_invert.c @@ -41,7 +41,7 @@ midgard_lower_invert(compiler_context *ctx, midgard_block *block) .type = TAG_ALU_4, .mask = ins->mask, .ssa_args = { - .src = { temp, -1, -1 }, + .src = { temp, ~0, ~0 }, .dest = ins->ssa_args.dest, .inline_constant = true }, diff --git a/src/panfrost/midgard/midgard_opt_perspective.c b/src/panfrost/midgard/midgard_opt_perspective.c index f5e483e..22b7736 100644 --- a/src/panfrost/midgard/midgard_opt_perspective.c +++ b/src/panfrost/midgard/midgard_opt_perspective.c @@ -116,7 +116,7 @@ midgard_opt_combine_projection(compiler_context *ctx, midgard_block *block) .mask = ins->mask, .ssa_args = { .dest = to, - .src = { frcp_from, -1, -1 }, + .src = { frcp_from, ~0, ~0 }, }, .load_store = { .op = frcp_component == COMPONENT_W ? diff --git a/src/panfrost/midgard/midgard_print.c b/src/panfrost/midgard/midgard_print.c index 1f8bd12..add4051 100644 --- a/src/panfrost/midgard/midgard_print.c +++ b/src/panfrost/midgard/midgard_print.c @@ -34,7 +34,7 @@ static void mir_print_index(int source) { - if (source < 0) { + if (source == ~0) { printf("_"); return; } diff --git a/src/panfrost/midgard/midgard_ra.c b/src/panfrost/midgard/midgard_ra.c index bdfb56b..f900e84 100644 --- a/src/panfrost/midgard/midgard_ra.c +++ b/src/panfrost/midgard/midgard_ra.c @@ -138,12 +138,14 @@ default_phys_reg(int reg) * register corresponds to */ static struct phys_reg -index_to_reg(compiler_context *ctx, struct ra_graph *g, int reg) +index_to_reg(compiler_context *ctx, struct ra_graph *g, unsigned reg) { /* Check for special cases */ - if (reg >= SSA_FIXED_MINIMUM) + if ((reg == ~0) && g) + return default_phys_reg(REGISTER_UNUSED); + else if (reg >= SSA_FIXED_MINIMUM) return default_phys_reg(SSA_REG_FROM_FIXED(reg)); - else if ((reg < 0) || !g) + else if (!g) return default_phys_reg(REGISTER_UNUSED); /* Special cases aside, we pick the underlying register */ @@ -301,7 +303,7 @@ static void set_class(unsigned *classes, unsigned node, unsigned class) { /* Check that we're even a node */ - if ((node < 0) || (node >= SSA_FIXED_MINIMUM)) + if (node >= SSA_FIXED_MINIMUM) return; /* First 4 are work, next 4 are load/store.. */ @@ -321,7 +323,7 @@ set_class(unsigned *classes, unsigned node, unsigned class) static void force_vec4(unsigned *classes, unsigned node) { - if ((node < 0) || (node >= SSA_FIXED_MINIMUM)) + if (node >= SSA_FIXED_MINIMUM) return; /* Force vec4 = 3 */ @@ -335,7 +337,7 @@ static bool check_read_class(unsigned *classes, unsigned tag, unsigned node) { /* Non-nodes are implicitly ok */ - if ((node < 0) || (node >= SSA_FIXED_MINIMUM)) + if (node >= SSA_FIXED_MINIMUM) return true; unsigned current_class = classes[node] >> 2; @@ -358,7 +360,7 @@ static bool check_write_class(unsigned *classes, unsigned tag, unsigned node) { /* Non-nodes are implicitly ok */ - if ((node < 0) || (node >= SSA_FIXED_MINIMUM)) + if (node >= SSA_FIXED_MINIMUM) return true; unsigned current_class = classes[node] >> 2; @@ -383,7 +385,7 @@ check_write_class(unsigned *classes, unsigned tag, unsigned node) static void mark_node_class (unsigned *bitfield, unsigned node) { - if ((node >= 0) && (node < SSA_FIXED_MINIMUM)) + if (node < SSA_FIXED_MINIMUM) BITSET_SET(bitfield, node); } @@ -522,7 +524,7 @@ mir_lower_special_reads(compiler_context *ctx) static void liveness_gen(uint8_t *live, unsigned node, unsigned max, unsigned mask) { - if ((node < 0) || (node >= max)) + if (node >= max) return; live[node] |= mask; @@ -531,7 +533,7 @@ liveness_gen(uint8_t *live, unsigned node, unsigned max, unsigned mask) static void liveness_kill(uint8_t *live, unsigned node, unsigned max, unsigned mask) { - if ((node < 0) || (node >= max)) + if (node >= max) return; live[node] &= ~mask; @@ -659,7 +661,7 @@ mir_compute_liveness( unsigned dest = ins->ssa_args.dest; - if (dest >= 0 && dest < ctx->temp_count) { + if (dest < ctx->temp_count) { for (unsigned i = 0; i < ctx->temp_count; ++i) if (live[i]) ra_add_node_interference(g, dest, i); @@ -710,7 +712,6 @@ allocate_registers(compiler_context *ctx, bool *spilled) unsigned *found_class = calloc(sizeof(unsigned), ctx->temp_count); mir_foreach_instr_global(ctx, ins) { - if (ins->ssa_args.dest < 0) continue; if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue; /* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */ @@ -931,7 +932,7 @@ install_registers_instr( compose_writemask(ins->mask, dest); /* If there is a register LOD/bias, use it */ - if (args.src[1] > -1) { + if (args.src[1] != ~0) { midgard_tex_register_select sel = { .select = lod.reg, .full = 1, diff --git a/src/panfrost/midgard/midgard_schedule.c b/src/panfrost/midgard/midgard_schedule.c index 6650281..399c259 100644 --- a/src/panfrost/midgard/midgard_schedule.c +++ b/src/panfrost/midgard/midgard_schedule.c @@ -159,15 +159,15 @@ can_writeout_fragment(compiler_context *ctx, midgard_instruction **bundle, unsig unsigned src1 = ins->ssa_args.src[1]; if (!mir_is_written_before(ctx, bundle[0], src0)) - src0 = -1; + src0 = ~0; if (!mir_is_written_before(ctx, bundle[0], src1)) - src1 = -1; + src1 = ~0; - if ((src0 > 0) && (src0 < node_count)) + if (src0 < node_count) BITSET_SET(dependencies, src0); - if ((src1 > 0) && (src1 < node_count)) + if (src1 < node_count) BITSET_SET(dependencies, src1); /* Requirement 2 */ @@ -630,7 +630,7 @@ midgard_pair_load_store(compiler_context *ctx, midgard_block *block) bool deps = false; for (unsigned s = 0; s < ARRAY_SIZE(ins->ssa_args.src); ++s) - deps |= (c->ssa_args.src[s] != -1); + deps |= (c->ssa_args.src[s] != ~0); if (deps) continue; @@ -652,7 +652,7 @@ midgard_pair_load_store(compiler_context *ctx, midgard_block *block) static unsigned find_or_allocate_temp(compiler_context *ctx, unsigned hash) { - if ((hash < 0) || (hash >= SSA_FIXED_MINIMUM)) + if (hash >= SSA_FIXED_MINIMUM) return hash; unsigned temp = (uintptr_t) _mesa_hash_table_u64_search( @@ -703,8 +703,8 @@ v_load_store_scratch( .type = TAG_LOAD_STORE_4, .mask = mask, .ssa_args = { - .dest = -1, - .src = { -1, -1, -1 }, + .dest = ~0, + .src = { ~0, ~0, ~0 }, }, .load_store = { .op = is_store ? midgard_op_st_int4 : midgard_op_ld_int4, diff --git a/src/panfrost/midgard/mir.c b/src/panfrost/midgard/mir.c index 9e26962..42b84b0 100644 --- a/src/panfrost/midgard/mir.c +++ b/src/panfrost/midgard/mir.c @@ -327,7 +327,7 @@ mir_special_index(compiler_context *ctx, unsigned idx) bool mir_is_written_before(compiler_context *ctx, midgard_instruction *ins, unsigned node) { - if ((node < 0) || (node >= SSA_FIXED_MINIMUM)) + if (node >= SSA_FIXED_MINIMUM) return true; mir_foreach_instr_global(ctx, q) { -- 2.7.4