struct brw_nir_compiler_opts opts = {};
brw_preprocess_nir(screen->compiler, nir, &opts);
- NIR_PASS_V(nir, brw_nir_lower_storage_image, devinfo);
+ NIR_PASS_V(nir, brw_nir_lower_storage_image,
+ &(struct brw_nir_lower_storage_image_opts) {
+ .devinfo = devinfo,
+ .lower_loads = true,
+ .lower_stores = true,
+ .lower_atomics = true,
+ .lower_get_size = true,
+ });
NIR_PASS_V(nir, crocus_lower_storage_image_derefs);
nir_sweep(nir);
struct brw_nir_compiler_opts opts = {};
brw_preprocess_nir(screen->compiler, nir, &opts);
- NIR_PASS_V(nir, brw_nir_lower_storage_image, devinfo);
+ NIR_PASS_V(nir, brw_nir_lower_storage_image,
+ &(struct brw_nir_lower_storage_image_opts) {
+ .devinfo = devinfo,
+ .lower_loads = true,
+ .lower_stores = true,
+ .lower_atomics = true,
+ .lower_get_size = true,
+ });
NIR_PASS_V(nir, iris_lower_storage_image_derefs);
nir_sweep(nir);
bool brw_nir_lower_shading_rate_output(nir_shader *nir);
+struct brw_nir_lower_storage_image_opts {
+ const struct intel_device_info *devinfo;
+
+ bool lower_loads;
+ bool lower_stores;
+ bool lower_atomics;
+ bool lower_get_size;
+};
+
bool brw_nir_lower_storage_image(nir_shader *nir,
- const struct intel_device_info *devinfo);
+ const struct brw_nir_lower_storage_image_opts *opts);
bool brw_nir_lower_mem_access_bit_sizes(nir_shader *shader,
const struct
{
if (instr->type != nir_instr_type_intrinsic)
return false;
- const struct intel_device_info *devinfo = cb_data;
+ const struct brw_nir_lower_storage_image_opts *opts = cb_data;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
switch (intrin->intrinsic) {
case nir_intrinsic_image_deref_load:
- return lower_image_load_instr(b, devinfo, intrin);
+ if (opts->lower_loads)
+ return lower_image_load_instr(b, opts->devinfo, intrin);
+ return false;
case nir_intrinsic_image_deref_store:
- return lower_image_store_instr(b, devinfo, intrin);
+ if (opts->lower_stores)
+ return lower_image_store_instr(b, opts->devinfo, intrin);
+ return false;
case nir_intrinsic_image_deref_atomic_add:
case nir_intrinsic_image_deref_atomic_imin:
case nir_intrinsic_image_deref_atomic_xor:
case nir_intrinsic_image_deref_atomic_exchange:
case nir_intrinsic_image_deref_atomic_comp_swap:
- return lower_image_atomic_instr(b, devinfo, intrin);
+ if (opts->lower_atomics)
+ return lower_image_atomic_instr(b, opts->devinfo, intrin);
+ return false;
case nir_intrinsic_image_deref_size:
- return lower_image_size_instr(b, devinfo, intrin);
+ if (opts->lower_get_size)
+ return lower_image_size_instr(b, opts->devinfo, intrin);
+ return false;
default:
/* Nothing to do */
bool
brw_nir_lower_storage_image(nir_shader *shader,
- const struct intel_device_info *devinfo)
+ const struct brw_nir_lower_storage_image_opts *opts)
{
bool progress = false;
progress |= nir_shader_instructions_pass(shader,
brw_nir_lower_storage_image_instr,
nir_metadata_none,
- (void *)devinfo);
+ (void *)opts);
return progress;
}
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
- NIR_PASS(_, nir, brw_nir_lower_storage_image, compiler->devinfo);
+ NIR_PASS(_, nir, brw_nir_lower_storage_image,
+ &(struct brw_nir_lower_storage_image_opts) {
+ .devinfo = compiler->devinfo,
+ .lower_loads = true,
+ .lower_stores = true,
+ .lower_atomics = true,
+ .lower_get_size = true,
+ });
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_global,
nir_address_format_64bit_global);
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
- NIR_PASS(_, nir, brw_nir_lower_storage_image, compiler->devinfo);
+ NIR_PASS(_, nir, brw_nir_lower_storage_image,
+ &(struct brw_nir_lower_storage_image_opts) {
+ .devinfo = compiler->devinfo,
+ .lower_loads = true,
+ .lower_stores = true,
+ .lower_atomics = true,
+ .lower_get_size = true,
+ });
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_global,
nir_address_format_64bit_global);