void
brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
- bool is_scalar, bool allow_copies)
+ bool is_scalar)
{
bool progress;
unsigned lower_flrp =
if (OPT(nir_opt_memcpy))
OPT(nir_split_var_copies);
OPT(nir_lower_vars_to_ssa);
- if (allow_copies) {
- /* Only run this pass in the first call to brw_nir_optimize. Later
- * calls assume that we've lowered away any copy_deref instructions
- * and we don't want to introduce any more.
+ if (!nir->info.var_copies_lowered) {
+ /* Only run this pass if nir_lower_var_copies was not called
+ * yet. That would lower away any copy_deref instructions and we
+ * don't want to introduce any more.
*/
OPT(nir_opt_find_array_copies);
}
OPT(nir_split_var_copies);
OPT(nir_split_struct_vars, nir_var_function_temp);
- brw_nir_optimize(nir, compiler, is_scalar, true);
+ brw_nir_optimize(nir, compiler, is_scalar);
OPT(nir_lower_doubles, opts->softfp64, nir->options->lower_doubles_options);
if (OPT(nir_lower_int64)) {
nir_lower_direct_array_deref_of_vec_load);
/* Get rid of split copies */
- brw_nir_optimize(nir, compiler, is_scalar, false);
+ brw_nir_optimize(nir, compiler, is_scalar);
}
void
if (p_is_scalar && c_is_scalar) {
NIR_PASS(_, producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
NIR_PASS(_, consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
- brw_nir_optimize(producer, compiler, p_is_scalar, false);
- brw_nir_optimize(consumer, compiler, c_is_scalar, false);
+ brw_nir_optimize(producer, compiler, p_is_scalar);
+ brw_nir_optimize(consumer, compiler, c_is_scalar);
}
if (nir_link_opt_varyings(producer, consumer))
- brw_nir_optimize(consumer, compiler, c_is_scalar, false);
+ brw_nir_optimize(consumer, compiler, c_is_scalar);
NIR_PASS(_, producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
NIR_PASS(_, consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
brw_nir_no_indirect_mask(compiler, consumer->info.stage),
UINT32_MAX);
- brw_nir_optimize(producer, compiler, p_is_scalar, false);
- brw_nir_optimize(consumer, compiler, c_is_scalar, false);
+ brw_nir_optimize(producer, compiler, p_is_scalar);
+ brw_nir_optimize(consumer, compiler, c_is_scalar);
}
NIR_PASS(_, producer, nir_lower_io_to_vector, nir_var_shader_out);
if (gl_shader_stage_can_set_fragment_shading_rate(nir->info.stage))
NIR_PASS(_, nir, brw_nir_lower_shading_rate_output);
- brw_nir_optimize(nir, compiler, is_scalar, false);
+ brw_nir_optimize(nir, compiler, is_scalar);
if (is_scalar && nir_shader_has_local_variables(nir)) {
OPT(nir_lower_vars_to_explicit_types, nir_var_function_temp,
glsl_get_natural_size_align_bytes);
OPT(nir_lower_explicit_io, nir_var_function_temp,
nir_address_format_32bit_offset);
- brw_nir_optimize(nir, compiler, is_scalar, false);
+ brw_nir_optimize(nir, compiler, is_scalar);
}
brw_vectorize_lower_mem_access(nir, compiler, is_scalar,
robust_buffer_access);
if (OPT(nir_lower_int64))
- brw_nir_optimize(nir, compiler, is_scalar, false);
+ brw_nir_optimize(nir, compiler, is_scalar);
if (devinfo->ver >= 6) {
/* Try and fuse multiply-adds */
OPT(nir_lower_subgroups, &subgroups_options);
if (OPT(nir_lower_int64))
- brw_nir_optimize(nir, compiler, is_scalar, false);
+ brw_nir_optimize(nir, compiler, is_scalar);
}
/* Clean up LCSSA phis */
OPT(brw_nir_limit_trig_input_range_workaround);
if (progress)
- brw_nir_optimize(nir, compiler, is_scalar, false);
+ brw_nir_optimize(nir, compiler, is_scalar);
}
enum brw_conditional_mod