bool lower_to_scalar = false;
bool lower_pack = false;
- nir_variable_mode robust_modes = (nir_variable_mode)0;
+ nir_load_store_vectorize_options vectorize_opts = {
+ .modes = nir_var_mem_ssbo | nir_var_mem_ubo |
+ nir_var_mem_push_const | nir_var_mem_shared |
+ nir_var_mem_global,
+ .callback = mem_vectorize_callback,
+ .robust_modes = 0,
+ };
if (device->robust_buffer_access) {
- robust_modes = nir_var_mem_ubo |
- nir_var_mem_ssbo |
- nir_var_mem_global |
- nir_var_mem_push_const;
+ vectorize_opts.robust_modes = nir_var_mem_ubo |
+ nir_var_mem_ssbo |
+ nir_var_mem_global |
+ nir_var_mem_push_const;
}
- if (nir_opt_load_store_vectorize(nir[i],
- nir_var_mem_ssbo | nir_var_mem_ubo |
- nir_var_mem_push_const | nir_var_mem_shared |
- nir_var_mem_global,
- mem_vectorize_callback, robust_modes)) {
+ if (nir_opt_load_store_vectorize(nir[i], &vectorize_opts)) {
lower_to_scalar = true;
lower_pack = true;
}
unsigned num_components,
nir_intrinsic_instr *low, nir_intrinsic_instr *high);
-bool nir_opt_load_store_vectorize(nir_shader *shader, nir_variable_mode modes,
- nir_should_vectorize_mem_func callback,
- nir_variable_mode robust_modes);
+typedef struct {
+ nir_should_vectorize_mem_func callback;
+ nir_variable_mode modes;
+ nir_variable_mode robust_modes;
+} nir_load_store_vectorize_options;
+
+bool nir_opt_load_store_vectorize(nir_shader *shader, const nir_load_store_vectorize_options *options);
void nir_sweep(nir_shader *shader);
};
struct vectorize_ctx {
- nir_variable_mode modes;
- nir_should_vectorize_mem_func callback;
- nir_variable_mode robust_modes;
+ const nir_load_store_vectorize_options *options;
struct list_head entries[nir_num_variable_modes];
struct hash_table *loads[nir_num_variable_modes];
struct hash_table *stores[nir_num_variable_modes];
if (new_bit_size / common_bit_size > NIR_MAX_VEC_COMPONENTS)
return false;
- if (!ctx->callback(low->align_mul,
- low->align_offset,
- new_bit_size, new_num_components,
- low->intrin, high->intrin))
+ if (!ctx->options->callback(low->align_mul,
+ low->align_offset,
+ new_bit_size, new_num_components,
+ low->intrin, high->intrin))
return false;
if (low->is_store) {
check_for_robustness(struct vectorize_ctx *ctx, struct entry *low)
{
nir_variable_mode mode = get_variable_mode(low);
- if (mode & ctx->robust_modes) {
+ if (mode & ctx->options->robust_modes) {
unsigned low_bit_size = get_bit_size(low);
unsigned low_size = low->intrin->num_components * low_bit_size;
struct entry *low, struct entry *high,
struct entry *first, struct entry *second)
{
- if (!(get_variable_mode(first) & ctx->modes) ||
- !(get_variable_mode(second) & ctx->modes))
+ if (!(get_variable_mode(first) & ctx->options->modes) ||
+ !(get_variable_mode(second) & ctx->options->modes))
return false;
if (check_for_aliasing(ctx, first, second))
nir_variable_mode mode = info->mode;
if (!mode)
mode = nir_src_as_deref(intrin->src[info->deref_src])->modes;
- if (!(mode & aliasing_modes(ctx->modes)))
+ if (!(mode & aliasing_modes(ctx->options->modes)))
continue;
unsigned mode_index = mode_to_index(mode);
}
bool
-nir_opt_load_store_vectorize(nir_shader *shader, nir_variable_mode modes,
- nir_should_vectorize_mem_func callback,
- nir_variable_mode robust_modes)
+nir_opt_load_store_vectorize(nir_shader *shader, const nir_load_store_vectorize_options *options)
{
bool progress = false;
struct vectorize_ctx *ctx = rzalloc(NULL, struct vectorize_ctx);
- ctx->modes = modes;
- ctx->callback = callback;
- ctx->robust_modes = robust_modes;
+ ctx->options = options;
- nir_shader_index_vars(shader, modes);
+ nir_shader_index_vars(shader, options->modes);
nir_foreach_function(function, shader) {
if (function->impl) {
- if (modes & nir_var_function_temp)
+ if (options->modes & nir_var_function_temp)
nir_function_impl_index_vars(function->impl);
nir_foreach_block(block, function->impl)
{
if (modes & nir_var_mem_shared)
nir_lower_vars_to_explicit_types(b->shader, nir_var_mem_shared, shared_type_info);
- bool progress = nir_opt_load_store_vectorize(b->shader, modes, mem_vectorize_callback, robust_modes);
+
+ nir_load_store_vectorize_options opts = { };
+ opts.callback = mem_vectorize_callback;
+ opts.modes = modes;
+ opts.robust_modes = robust_modes;
+ bool progress = nir_opt_load_store_vectorize(b->shader, &opts);
+
if (progress) {
nir_validate_shader(b->shader, NULL);
if (cse)
progress |= OPT(s, nir_lower_pack);
progress |= OPT(s, nir_opt_constant_folding);
- progress |= OPT(s, nir_opt_load_store_vectorize, nir_var_mem_ubo,
- ir3_nir_should_vectorize_mem, 0);
+ nir_load_store_vectorize_options vectorize_opts = {
+ .modes = nir_var_mem_ubo,
+ .callback = ir3_nir_should_vectorize_mem,
+ .robust_modes = 0,
+ };
+ progress |= OPT(s, nir_opt_load_store_vectorize, &vectorize_opts);
if (lower_flrp != 0) {
if (OPT(s, nir_lower_flrp,
control_flow_depth == 0 ? ~0 : 8, true, true);
NIR_PASS(progress, s, nir_opt_algebraic);
NIR_PASS(progress, s, nir_opt_constant_folding);
- NIR_PASS(progress, s, nir_opt_load_store_vectorize, nir_var_mem_ubo,
- ntt_should_vectorize_io, 0);
+ nir_load_store_vectorize_options vectorize_opts = {
+ .modes = nir_var_mem_ubo,
+ .callback = ntt_should_vectorize_io,
+ .robust_modes = 0,
+ };
+ NIR_PASS(progress, s, nir_opt_load_store_vectorize, &vectorize_opts);
NIR_PASS(progress, s, nir_opt_shrink_vectors);
NIR_PASS(progress, s, nir_opt_trivial_continues);
NIR_PASS(progress, s, nir_opt_vectorize, ntt_should_vectorize_instr, NULL);
bool progress = false;
if (is_scalar) {
- OPT(nir_opt_load_store_vectorize,
- nir_var_mem_ubo | nir_var_mem_ssbo |
- nir_var_mem_global | nir_var_mem_shared,
- brw_nir_should_vectorize_mem,
- (nir_variable_mode)0);
+ nir_load_store_vectorize_options options = {
+ .modes = nir_var_mem_ubo | nir_var_mem_ssbo |
+ nir_var_mem_global | nir_var_mem_shared,
+ .callback = brw_nir_should_vectorize_mem,
+ .robust_modes = (nir_variable_mode)0,
+ };
+
+ OPT(nir_opt_load_store_vectorize, &options);
}
OPT(brw_nir_lower_mem_access_bit_sizes, devinfo);