}
void
-v3d_optimize_nir(struct v3d_compile *c, struct nir_shader *s, bool allow_copies)
+v3d_optimize_nir(struct v3d_compile *c, struct nir_shader *s)
{
bool progress;
unsigned lower_flrp =
NIR_PASS(progress, s, nir_opt_deref);
NIR_PASS(progress, s, nir_lower_vars_to_ssa);
- if (allow_copies) {
+ if (!s->info.var_copies_lowered) {
/* Only run this pass if nir_lower_var_copies was not called
* yet. That would lower away any copy_deref instructions and we
* don't want to introduce any more.
const struct v3d_compiler *v3d_compiler_init(const struct v3d_device_info *devinfo,
uint32_t max_inline_uniform_buffers);
void v3d_compiler_free(const struct v3d_compiler *compiler);
-void v3d_optimize_nir(struct v3d_compile *c, struct nir_shader *s, bool allow_copies);
+void v3d_optimize_nir(struct v3d_compile *c, struct nir_shader *s);
uint64_t *v3d_compile(const struct v3d_compiler *compiler,
struct v3d_key *key,
NIR_PASS(_, c->s, nir_remove_unused_io_vars,
nir_var_shader_out, used_outputs, NULL); /* demotes to globals */
NIR_PASS(_, c->s, nir_lower_global_vars_to_local);
- v3d_optimize_nir(c, c->s, false);
+ v3d_optimize_nir(c, c->s);
NIR_PASS(_, c->s, nir_remove_dead_variables, nir_var_shader_in, NULL);
/* This must go before nir_lower_io */
NIR_PASS(_, c->s, nir_remove_unused_io_vars,
nir_var_shader_out, used_outputs, NULL); /* demotes to globals */
NIR_PASS(_, c->s, nir_lower_global_vars_to_local);
- v3d_optimize_nir(c, c->s, false);
+ v3d_optimize_nir(c, c->s);
NIR_PASS(_, c->s, nir_remove_dead_variables, nir_var_shader_in, NULL);
/* This must go before nir_lower_io */
NIR_PASS(_, c->s, v3d_nir_lower_subgroup_intrinsics, c);
- v3d_optimize_nir(c, c->s, false);
+ v3d_optimize_nir(c, c->s);
/* Do late algebraic optimization to turn add(a, neg(b)) back into
* subs, then the mandatory cleanup after algebraic. Note that it may
NIR_PASS(_, nir, nir_split_var_copies);
NIR_PASS(_, nir, nir_split_struct_vars, nir_var_function_temp);
- v3d_optimize_nir(NULL, nir, true);
+ v3d_optimize_nir(NULL, nir);
NIR_PASS(_, nir, nir_lower_explicit_io,
nir_var_mem_push_const,
NIR_PASS(_, nir, nir_lower_frexp);
/* Get rid of split copies */
- v3d_optimize_nir(NULL, nir, false);
+ v3d_optimize_nir(NULL, nir);
}
static nir_shader *
nir_lower_io_arrays_to_elements(producer, consumer);
- v3d_optimize_nir(NULL, producer, false);
- v3d_optimize_nir(NULL, consumer, false);
+ v3d_optimize_nir(NULL, producer);
+ v3d_optimize_nir(NULL, consumer);
if (nir_link_opt_varyings(producer, consumer))
- v3d_optimize_nir(NULL, consumer, false);
+ v3d_optimize_nir(NULL, consumer);
NIR_PASS(_, producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
NIR_PASS(_, consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
NIR_PASS(_, producer, nir_lower_global_vars_to_local);
NIR_PASS(_, consumer, nir_lower_global_vars_to_local);
- v3d_optimize_nir(NULL, producer, false);
- v3d_optimize_nir(NULL, consumer, false);
+ v3d_optimize_nir(NULL, producer);
+ v3d_optimize_nir(NULL, consumer);
/* Optimizations can cause varyings to become unused.
* nir_compact_varyings() depends on all dead varyings being removed so
p_stage->nir = pipeline_stage_get_nir(p_stage, pipeline, cache);
assert(p_stage->nir);
- v3d_optimize_nir(NULL, p_stage->nir, false);
+ v3d_optimize_nir(NULL, p_stage->nir);
pipeline_lower_nir(pipeline, p_stage, pipeline->layout);
lower_cs_shared(p_stage->nir);
NIR_PASS(_, s, nir_lower_load_const_to_scalar);
- v3d_optimize_nir(NULL, s, true);
+ v3d_optimize_nir(NULL, s);
NIR_PASS(_, s, nir_lower_var_copies);
/* Get rid of split copies */
- v3d_optimize_nir(NULL, s, false);
+ v3d_optimize_nir(NULL, s);
NIR_PASS(_, s, nir_remove_dead_variables, nir_var_function_temp, NULL);