Codebase audit. In preparation to dynamically allocate sources.
Signed-off-by: Alyssa Rosenzweig <alyssa@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17794>
* adding a new pattern here, check why you need it and whether we can avoid
* generating the constant BIR at all. */
+static inline uint32_t
+bi_source_value(const bi_instr *I, unsigned s)
+{
+ if (s < I->nr_srcs)
+ return bi_apply_swizzle(I->src[s].value, I->src[s].swizzle);
+ else
+ return 0;
+}
+
uint32_t
bi_fold_constant(bi_instr *I, bool *unsupported)
{
}
/* Grab the sources */
- uint32_t a = bi_apply_swizzle(I->src[0].value, I->src[0].swizzle);
- uint32_t b = bi_apply_swizzle(I->src[1].value, I->src[1].swizzle);
- uint32_t c = bi_apply_swizzle(I->src[2].value, I->src[2].swizzle);
- uint32_t d = bi_apply_swizzle(I->src[3].value, I->src[3].swizzle);
+ uint32_t a = bi_source_value(I, 0);
+ uint32_t b = bi_source_value(I, 1);
+ uint32_t c = bi_source_value(I, 2);
+ uint32_t d = bi_source_value(I, 3);
/* Evaluate the instruction */
switch (I->op) {
* moves are free.
*/
b.cursor = bi_before_block(block);
- for (unsigned i = 0; i < collect->nr_srcs; ++i) {
+ bi_foreach_src(collect, i) {
unsigned reg = (nr_preload * 4) + i;
collect->src[i] = bi_mov_i32(&b, bi_register(reg));
bi_instr *vec = bi_collect_i32_to(&b, ins->dest[0]);
vec->nr_srcs = bi_opcode_props[ins->op].sr_count;
- for (unsigned w = 0; w < vec->nr_srcs; ++w) {
+ bi_foreach_src(vec, w) {
/* FAU is grouped in pairs (2 x 4-byte) */
unsigned base =
pan_lookup_pushed_ubo(ctx->info.push, ubo,
static inline enum bifrost_packed_src
bi_get_src_new(bi_instr *ins, bi_registers *regs, unsigned s)
{
- if (!ins)
+ if (!ins || s >= ins->nr_srcs)
return 0;
bi_index src = ins->src[s];
assert(dest.offset == 0);
assert((bi_is_ssa(dest) || I->nr_srcs == 1) && "nir_lower_phis_to_scalar");
- for (unsigned i = 0; i < I->nr_srcs; ++i) {
+ bi_foreach_src(I, i) {
if (bi_is_null(I->src[i]))
continue;
static bool
bi_is_tied(const bi_instr *I)
{
- if (bi_is_null(I->src[0]))
- return false;
-
return (I->op == BI_OPCODE_TEXC ||
I->op == BI_OPCODE_TEXC_DUAL ||
I->op == BI_OPCODE_ATOM_RETURN_I32 ||
I->op == BI_OPCODE_AXCHG_I32 ||
- I->op == BI_OPCODE_ACMPXCHG_I32);
+ I->op == BI_OPCODE_ACMPXCHG_I32) &&
+ !bi_is_null(I->src[0]);
}
/*
static bool
bi_impacted_t_modifiers(bi_instr *I, unsigned src)
{
+ assert(src < I->nr_srcs);
enum bi_swizzle swizzle = I->src[src].swizzle;
switch (I->op) {
static bool
bi_tuple_is_new_src(bi_instr *instr, struct bi_reg_state *reg, unsigned src_idx)
{
+ assert(src_idx < instr->nr_srcs);
bi_index src = instr->src[src_idx];
/* Only consider sources which come from the register file */
static bool
bi_check_fau_src(bi_instr *ins, unsigned s, uint32_t *constants, unsigned *cwords, bi_index *fau)
{
+ assert(s < ins->nr_srcs);
bi_index src = ins->src[s];
/* Staging registers can't have FAU accesses */
* Obscurely, ATOM_CX is sr_write but can ignore the staging register in
* certain circumstances; this does not require consideration.
*/
- if (bi_opcode_props[I->op].sr_write && I->nr_dests &&
+ if (bi_opcode_props[I->op].sr_write && I->nr_dests && I->nr_srcs &&
bi_is_null(I->dest[0]) && !bi_is_null(I->src[0])) {
unsigned reg = I->src[0].value;
static bool
va_is_add_imm(bi_instr *I, unsigned s)
{
+ assert(s < I->nr_srcs);
+
return I->src[s].swizzle == BI_SWIZZLE_H01 &&
!I->src[s].abs && !I->src[s].neg && !I->clamp && !I->round;
}
static void
va_lower_mov_imm(bi_instr *I)
{
+ assert(I->nr_srcs == 1);
+
if (I->src[0].type == BI_INDEX_CONSTANT) {
I->op = BI_OPCODE_IADD_IMM_I32;
I->index = I->src[0].value;