if (nir_lower_io_to_scalar_early(ordered_shaders[i], mask)) {
/* Optimize the new vector code and then remove dead vars */
nir_copy_prop(ordered_shaders[i]);
- nir_opt_shrink_vectors(ordered_shaders[i]);
+ nir_opt_shrink_vectors(ordered_shaders[i], true);
if (ordered_shaders[i]->info.stage != last) {
/* Optimize swizzled movs of load_const for
radv_lower_io(device, nir[i]);
- lower_to_scalar |= nir_opt_shrink_vectors(nir[i]);
+ lower_to_scalar |= nir_opt_shrink_vectors(nir[i], true);
if (lower_to_scalar)
nir_lower_alu_to_scalar(nir[i], NULL, NULL);
}
NIR_PASS(progress, shader, nir_opt_undef);
- NIR_PASS(progress, shader, nir_opt_shrink_vectors);
+ NIR_PASS(progress, shader, nir_opt_shrink_vectors, true);
if (shader->options->max_unroll_iterations) {
NIR_PASS(progress, shader, nir_opt_loop_unroll, 0);
}
bool nir_opt_remove_phis(nir_shader *shader);
bool nir_opt_remove_phis_block(nir_block *block);
-bool nir_opt_shrink_vectors(nir_shader *shader);
+bool nir_opt_shrink_vectors(nir_shader *shader, bool shrink_image_store);
bool nir_opt_trivial_continues(nir_shader *shader);
}
static bool
-opt_shrink_vectors_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
+opt_shrink_vectors_intrinsic(nir_builder *b, nir_intrinsic_instr *instr, bool shrink_image_store)
{
switch (instr->intrinsic) {
case nir_intrinsic_load_uniform:
case nir_intrinsic_bindless_image_store:
case nir_intrinsic_image_deref_store:
case nir_intrinsic_image_store:
- return opt_shrink_vectors_image_store(b, instr);
+ return shrink_image_store && opt_shrink_vectors_image_store(b, instr);
default:
return false;
}
}
static bool
-opt_shrink_vectors_instr(nir_builder *b, nir_instr *instr)
+opt_shrink_vectors_instr(nir_builder *b, nir_instr *instr, bool shrink_image_store)
{
b->cursor = nir_before_instr(instr);
return opt_shrink_vectors_alu(b, nir_instr_as_alu(instr));
case nir_instr_type_intrinsic:
- return opt_shrink_vectors_intrinsic(b, nir_instr_as_intrinsic(instr));
+ return opt_shrink_vectors_intrinsic(b, nir_instr_as_intrinsic(instr), shrink_image_store);
case nir_instr_type_load_const:
return opt_shrink_vectors_load_const(nir_instr_as_load_const(instr));
}
bool
-nir_opt_shrink_vectors(nir_shader *shader)
+nir_opt_shrink_vectors(nir_shader *shader, bool shrink_image_store)
{
bool progress = false;
nir_foreach_block(block, function->impl) {
nir_foreach_instr(instr, block) {
- progress |= opt_shrink_vectors_instr(&b, instr);
+ progress |= opt_shrink_vectors_instr(&b, instr, shrink_image_store);
}
}
.robust_modes = 0,
};
NIR_PASS(progress, s, nir_opt_load_store_vectorize, &vectorize_opts);
- NIR_PASS(progress, s, nir_opt_shrink_vectors);
+ NIR_PASS(progress, s, nir_opt_shrink_vectors, true);
NIR_PASS(progress, s, nir_opt_trivial_continues);
NIR_PASS(progress, s, nir_opt_vectorize, ntt_should_vectorize_instr, NULL);
NIR_PASS(progress, s, nir_opt_undef);
NIR_PASS_V(s, nir_lower_vars_to_ssa);
progress |= OPT(s, nir_opt_copy_prop_vars);
- progress |= OPT(s, nir_opt_shrink_vectors);
+ progress |= OPT(s, nir_opt_shrink_vectors, true);
progress |= OPT(s, nir_copy_prop);
progress |= OPT(s, nir_opt_dce);
progress |= OPT(s, nir_opt_cse);
if (is_scalar) {
OPT(nir_lower_alu_to_scalar, NULL, NULL);
} else {
- OPT(nir_opt_shrink_vectors);
+ OPT(nir_opt_shrink_vectors, true);
}
OPT(nir_copy_prop);