ir3_put_dst(ctx, &tex->dest);
}
+/* phi instructions are left partially constructed. We don't resolve
+ * their srcs until the end of the shader, since (eg. loops) one of
+ * the phi's srcs might be defined after the phi due to back edges in
+ * the CFG.
+ */
+static void
+emit_phi(struct ir3_context *ctx, nir_phi_instr *nphi)
+{
+ struct ir3_instruction *phi, **dst;
+
+ /* NOTE: phi's should be lowered to scalar at this point */
+ compile_assert(ctx, nphi->dest.ssa.num_components == 1);
+
+ dst = ir3_get_dst(ctx, &nphi->dest, 1);
+
+ phi = ir3_instr_create(ctx->block, OPC_META_PHI,
+ 1 + exec_list_length(&nphi->srcs));
+ __ssa_dst(phi);
+ phi->phi.nphi = nphi;
+
+ dst[0] = phi;
+
+ ir3_put_dst(ctx, &nphi->dest);
+}
+
+static struct ir3_block *get_block(struct ir3_context *ctx, const nir_block *nblock);
+
+static void
+resolve_phis(struct ir3_context *ctx, struct ir3_block *block)
+{
+ foreach_instr (phi, &block->instr_list) {
+ if (phi->opc != OPC_META_PHI)
+ break;
+
+ nir_phi_instr *nphi = phi->phi.nphi;
+
+ for (unsigned i = 0; i < block->predecessors_count; i++) {
+ struct ir3_block *pred = block->predecessors[i];
+ nir_foreach_phi_src(nsrc, nphi) {
+ if (get_block(ctx, nsrc->pred) == pred) {
+ if (nsrc->src.ssa->parent_instr->type == nir_instr_type_ssa_undef) {
+ /* Create an ir3 undef */
+ ir3_reg_create(phi, INVALID_REG, phi->regs[0]->flags);
+ } else {
+ struct ir3_instruction *src = ir3_get_src(ctx, &nsrc->src)[0];
+ __ssa_src(phi, src, 0);
+ }
+ break;
+ }
+ }
+ }
+ }
+}
+
static void
emit_jump(struct ir3_context *ctx, nir_jump_instr *jump)
{
emit_jump(ctx, nir_instr_as_jump(instr));
break;
case nir_instr_type_phi:
- /* we have converted phi webs to regs in NIR by now */
- ir3_context_error(ctx, "Unexpected NIR instruction type: %d\n", instr->type);
+ emit_phi(ctx, nir_instr_as_phi(instr));
break;
case nir_instr_type_call:
case nir_instr_type_parallel_copy:
}
setup_predecessors(ctx->ir);
+ foreach_block (block, &ctx->ir->block_list) {
+ resolve_phis(ctx, block);
+ }
}
static void
static void
sched_node_add_deps(struct ir3_instruction *instr)
{
+ /* There's nothing to do for phi nodes, since they always go first. And
+ * phi nodes can reference sources later in the same block, so handling
+ * sources is not only unnecessary but could cause problems.
+ */
+ if (instr->opc == OPC_META_PHI)
+ return;
+
/* Since foreach_ssa_src() already handles false-dep's we can construct
* the DAG easily in a single pass.
*/
ctx->remaining_tex++;
}
- /* First schedule all meta:input instructions, followed by
- * tex-prefetch. We want all of the instructions that load
- * values into registers before the shader starts to go
- * before any other instructions. But in particular we
- * want inputs to come before prefetches. This is because
- * a FS's bary_ij input may not actually be live in the
- * shader, but it should not be scheduled on top of any
- * other input (but can be overwritten by a tex prefetch)
+ /* First schedule all meta:input and meta:phi instructions, followed by
+ * tex-prefetch. We want all of the instructions that load values into
+ * registers before the shader starts to go before any other instructions.
+ * But in particular we want inputs to come before prefetches. This is
+ * because a FS's bary_ij input may not actually be live in the shader,
+ * but it should not be scheduled on top of any other input (but can be
+ * overwritten by a tex prefetch)
+ *
+ * Note: Because the first block cannot have predecessors, meta:input and
+ * meta:phi cannot exist in the same block.
*/
foreach_instr_safe (instr, &ctx->unscheduled_list)
- if (instr->opc == OPC_META_INPUT)
+ if (instr->opc == OPC_META_INPUT || instr->opc == OPC_META_PHI)
schedule(ctx, instr);
foreach_instr_safe (instr, &ctx->unscheduled_list)
validate_assert(ctx, reg_class_flags(src->regs[0]) == reg_class_flags(reg));
}
+/* phi sources are logically read at the end of the predecessor basic block,
+ * and we have to validate them then in order to correctly validate that the
+ * use comes after the definition for loop phis.
+ */
+static void
+validate_phi_src(struct ir3_validate_ctx *ctx, struct ir3_block *block, struct ir3_block *pred)
+{
+ unsigned pred_idx = ir3_block_get_pred_index(block, pred);
+
+ foreach_instr (phi, &block->instr_list) {
+ if (phi->opc != OPC_META_PHI)
+ break;
+
+ ctx->current_instr = phi;
+ validate_assert(ctx, phi->regs_count == block->predecessors_count + 1);
+ validate_src(ctx, phi->regs[1 + pred_idx]);
+ }
+}
+
+static void
+validate_phi(struct ir3_validate_ctx *ctx, struct ir3_instruction *phi)
+{
+ _mesa_set_add(ctx->defs, phi);
+ validate_assert(ctx, writes_gpr(phi));
+}
+
#define validate_reg_size(ctx, reg, type) \
validate_assert(ctx, type_size(type) == (((reg)->flags & IR3_REG_HALF) ? 16 : 32))
ctx->defs = _mesa_pointer_set_create(ctx);
foreach_block (block, &ir->block_list) {
+ /* We require that the first block does not have any predecessors,
+ * which allows us to assume that phi nodes and meta:input's do not
+ * appear in the same basic block.
+ */
+ validate_assert(ctx,
+ block != ir3_start_block(ir) || block->predecessors_count == 0);
+
+ struct ir3_instruction *prev = NULL;
foreach_instr (instr, &block->instr_list) {
ctx->current_instr = instr;
- validate_instr(ctx, instr);
+ if (instr->opc == OPC_META_PHI) {
+ /* phis must be the first in the block */
+ validate_assert(ctx, prev == NULL || prev->opc == OPC_META_PHI);
+ validate_phi(ctx, instr);
+ } else {
+ validate_instr(ctx, instr);
+ }
+ prev = instr;
+ }
+
+ for (unsigned i = 0; i < 2; i++) {
+ if (block->successors[i])
+ validate_phi_src(ctx, block->successors[i], block);
}
}