void mir_flip(midgard_instruction *ins);
void mir_compute_temp_count(compiler_context *ctx);
-void mir_set_offset(compiler_context *ctx, midgard_instruction *ins, nir_src *offset, bool is_shared);
+#define LDST_GLOBAL 0x3E
+#define LDST_SHARED 0x2E
+#define LDST_SCRATCH 0x2A
+
+void mir_set_offset(compiler_context *ctx, midgard_instruction *ins, nir_src *offset, unsigned seg);
/* 'Intrinsic' move for aliasing */
}
void
-mir_set_offset(compiler_context *ctx, midgard_instruction *ins, nir_src *offset, bool is_shared)
+mir_set_offset(compiler_context *ctx, midgard_instruction *ins, nir_src *offset, unsigned seg)
{
for(unsigned i = 0; i < 16; ++i) {
ins->swizzle[1][i] = 0;
bool force_sext = (nir_src_bit_size(*offset) < 64);
if (!offset->is_ssa) {
- ins->load_store.arg_1 |= is_shared ? 0x6E : 0x7E;
+ ins->load_store.arg_1 |= seg;
ins->src[2] = nir_src_index(ctx, offset);
ins->src_types[2] = nir_type_uint | nir_src_bit_size(*offset);
return;
}
- struct mir_address match = mir_match_offset(offset->ssa, !is_shared);
+ bool first_free = (seg == LDST_GLOBAL);
+
+ struct mir_address match = mir_match_offset(offset->ssa, first_free);
if (match.A.def) {
ins->src[1] = nir_ssa_index(match.A.def);
ins->swizzle[1][0] = match.A.comp;
ins->src_types[1] = nir_type_uint | match.A.def->bit_size;
} else
- ins->load_store.arg_1 |= is_shared ? 0x6E : 0x7E;
+ ins->load_store.arg_1 |= seg;
if (match.B.def) {
ins->src[2] = nir_ssa_index(match.B.def);
bool is_read,
unsigned srcdest,
nir_src *offset,
- bool is_shared)
+ unsigned seg)
{
/* TODO: types */
else
ins = m_st_int4(srcdest, 0);
- mir_set_offset(ctx, &ins, offset, is_shared);
+ mir_set_offset(ctx, &ins, offset, seg);
mir_set_intr_mask(instr, &ins, is_read);
/* Set a valid swizzle for masked out components */
if (is_shared)
ins.load_store.arg_1 |= 0x6E;
} else {
- mir_set_offset(ctx, &ins, src_offset, is_shared);
+ mir_set_offset(ctx, &ins, src_offset, is_shared ? LDST_SHARED : LDST_GLOBAL);
}
mir_set_intr_mask(&instr->instr, &ins, true);
uint32_t uindex = nir_src_as_uint(index) + 1;
emit_ubo_read(ctx, &instr->instr, reg, offset, indirect_offset, 0, uindex);
} else if (is_global || is_shared) {
- emit_global(ctx, &instr->instr, true, reg, src_offset, is_shared);
+ unsigned seg = is_global ? LDST_GLOBAL : (is_shared ? LDST_SHARED : LDST_SCRATCH);
+ emit_global(ctx, &instr->instr, true, reg, src_offset, seg);
} else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
emit_varying_read(ctx, reg, offset, nr_comp, component, indirect_offset, t | nir_dest_bit_size(instr->dest), is_flat);
} else if (ctx->is_blend) {
reg = nir_src_index(ctx, &instr->src[0]);
emit_explicit_constant(ctx, reg, reg);
- emit_global(ctx, &instr->instr, false, reg, &instr->src[1], instr->intrinsic == nir_intrinsic_store_shared);
+ unsigned seg;
+ if (instr->intrinsic == nir_intrinsic_store_global)
+ seg = LDST_GLOBAL;
+ else if (instr->intrinsic == nir_intrinsic_store_shared)
+ seg = LDST_SHARED;
+
+ emit_global(ctx, &instr->instr, false, reg, &instr->src[1], seg);
break;
case nir_intrinsic_load_ssbo_address: