nir_registers are only supposed to be used temporarily. They may be created by a
producer, but then must be immediately lowered prior to optimizing the produced
shader. They may be created internally by an optimization pass that doesn't want
to deal with phis, but that pass needs to lower them back to phis immediately.
Finally they may be created when going out-of-SSA if a backend chooses, but that
has to happen late.
Regardless, there should be no case where a backend sees a shader that comes in
with nir_registers needing to be lowered. The two frontend producers of
registers (tgsi_to_nir and mesa/st) both call nir_lower_regs_to_ssa to clean up
as they should. Some backend (like intel) already depend on this behaviour.
There's no need for other backends to call nir_lower_regs_to_ssa too.
Drop the pointless calls as a baby step towards replacing nir_register.
Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23181>
.allow_fp16 = true,
};
- NIR_PASS_V(nir, nir_lower_regs_to_ssa);
NIR_PASS_V(nir, nir_lower_idiv, &idiv_options);
NIR_PASS_V(nir, nir_lower_frexp);
NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS_V(s, nir_lower_frexp);
NIR_PASS_V(s, nir_lower_amul, ir3_glsl_type_size);
- OPT_V(s, nir_lower_regs_to_ssa);
OPT_V(s, nir_lower_wrmasks, should_split_wrmask, s);
OPT_V(s, nir_lower_tex, &tex_options);
NIR_PASS_V(s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
type_size, (nir_lower_io_options)0);
- NIR_PASS_V(s, nir_lower_regs_to_ssa);
nir_to_tgsi_lower_txp(s);
NIR_PASS_V(s, nir_to_tgsi_lower_tex);
NIR_PASS_V(s, nir_lower_io, nir_var_shader_in | nir_var_uniform, etna_glsl_type_size,
(nir_lower_io_options)0);
- NIR_PASS_V(s, nir_lower_regs_to_ssa);
NIR_PASS_V(s, nir_lower_vars_to_ssa);
NIR_PASS_V(s, nir_lower_indirect_derefs, nir_var_all, UINT32_MAX);
NIR_PASS_V(s, nir_lower_tex, &(struct nir_lower_tex_options) { .lower_txp = ~0u, .lower_invalid_implicit_lod = true, });
debug_printf("----------------------\n");
}
- OPT_V(s, nir_lower_regs_to_ssa);
OPT_V(s, nir_lower_vars_to_ssa);
OPT_V(s, nir_lower_indirect_derefs, nir_var_shader_in | nir_var_shader_out,
UINT32_MAX);
NIR_PASS_V(s, nir_lower_fragcoord_wtrans);
NIR_PASS_V(s, nir_lower_io,
nir_var_shader_in | nir_var_shader_out, type_size, 0);
- NIR_PASS_V(s, nir_lower_regs_to_ssa);
NIR_PASS_V(s, nir_lower_tex, tex_options);
NIR_PASS_V(s, lima_nir_lower_txp);
sel->nir = tgsi_to_nir(sel->tokens, ctx->screen, true);
/* Lower int64 ops because we have some r600 built-in shaders that use it */
if (nir_options->lower_int64_options) {
- NIR_PASS_V(sel->nir, nir_lower_regs_to_ssa);
NIR_PASS_V(sel->nir, nir_lower_alu_to_scalar, r600_lower_to_scalar_instr_filter, NULL);
NIR_PASS_V(sel->nir, nir_lower_int64);
}
nir_shader *nir = (nir_shader *)shader;
- NIR_PASS_V(nir, nir_lower_regs_to_ssa);
const int nir_lower_flrp_mask = 16 | 32 | 64;
NIR_PASS_V(nir, nir_lower_flrp, nir_lower_flrp_mask, false);
type_size, (nir_lower_io_options)0);
}
- NIR_PASS(_, s, nir_lower_regs_to_ssa);
NIR_PASS(_, s, nir_normalize_cubemap_coords);
NIR_PASS(_, s, nir_lower_load_const_to_scalar);
nir_var_shader_in | nir_var_shader_out | nir_var_uniform,
type_size, (nir_lower_io_options)0);
- NIR_PASS_V(s, nir_lower_regs_to_ssa);
NIR_PASS_V(s, nir_normalize_cubemap_coords);
NIR_PASS_V(s, nir_lower_load_const_to_scalar);
NIR_PASS_V(nir, fixup_io_locations);
NIR_PASS_V(nir, lower_basevertex);
- NIR_PASS_V(nir, nir_lower_regs_to_ssa);
NIR_PASS_V(nir, lower_baseinstance);
NIR_PASS_V(nir, lower_sparse);
NIR_PASS_V(nir, split_bitfields);
nir->info.tess.tcs_vertices_out = vertices_per_patch;
nir_validate_shader(nir, "created");
- NIR_PASS_V(nir, nir_lower_regs_to_ssa);
optimize_nir(nir, NULL);
NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
NIR_PASS_V(nir, nir_convert_from_ssa, true);
/* prepare for IO lowering */
NIR_PASS_V(nir, nir_lower_flrp, lower_flrp, false);
NIR_PASS_V(nir, nir_opt_deref);
- NIR_PASS_V(nir, nir_lower_regs_to_ssa);
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
/* codegen assumes vec4 alignment for memory */
NIR_PASS_V(nir, pan_lower_sample_pos);
NIR_PASS_V(nir, nir_lower_bit_size, bi_lower_bit_size, NULL);
NIR_PASS_V(nir, nir_lower_64bit_phis);
- NIR_PASS_V(nir, nir_lower_regs_to_ssa);
NIR_PASS_V(nir, pan_nir_lower_64bit_intrin);
NIR_PASS_V(nir, pan_lower_helper_invocation);
NIR_PASS_V(nir, nir_lower_int64);
NIR_PASS_V(nir, pan_nir_lower_64bit_intrin);
NIR_PASS_V(nir, nir_lower_frexp);
-
NIR_PASS_V(nir, midgard_nir_lower_global_load);
- NIR_PASS_V(nir, nir_lower_regs_to_ssa);
nir_lower_idiv_options idiv_options = {
.allow_fp16 = true,
};