}
}
+static bool
+iris_uses_image_atomic(const nir_shader *shader)
+{
+ nir_foreach_function(function, shader) {
+ if (function->impl == NULL)
+ continue;
+
+ nir_foreach_block(block, function->impl) {
+ nir_foreach_instr(instr, block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+ switch (intrin->intrinsic) {
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_image_deref_atomic_imin:
+ case nir_intrinsic_image_deref_atomic_umin:
+ case nir_intrinsic_image_deref_atomic_imax:
+ case nir_intrinsic_image_deref_atomic_umax:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_image_deref_atomic_xor:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ unreachable("Should have been lowered in "
+ "iris_lower_storage_image_derefs");
+
+ case nir_intrinsic_image_atomic_add:
+ case nir_intrinsic_image_atomic_imin:
+ case nir_intrinsic_image_atomic_umin:
+ case nir_intrinsic_image_atomic_imax:
+ case nir_intrinsic_image_atomic_umax:
+ case nir_intrinsic_image_atomic_and:
+ case nir_intrinsic_image_atomic_or:
+ case nir_intrinsic_image_atomic_xor:
+ case nir_intrinsic_image_atomic_exchange:
+ case nir_intrinsic_image_atomic_comp_swap:
+ return true;
+
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
/**
* Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
*/
brw_preprocess_nir(screen->compiler, nir, NULL);
- NIR_PASS_V(nir, brw_nir_lower_storage_image, devinfo,
- &ish->uses_atomic_load_store);
+ NIR_PASS_V(nir, brw_nir_lower_storage_image, devinfo);
NIR_PASS_V(nir, iris_lower_storage_image_derefs);
nir_sweep(nir);
+ ish->uses_atomic_load_store = iris_uses_image_atomic(nir);
+
ish->program_id = get_new_program_id(screen);
ish->nir = nir;
if (so_info) {
bool brw_nir_lower_scoped_barriers(nir_shader *nir);
bool brw_nir_lower_storage_image(nir_shader *nir,
- const struct intel_device_info *devinfo,
- bool *uses_atomic_load_store);
+ const struct intel_device_info *devinfo);
void brw_nir_rewrite_image_intrinsic(nir_intrinsic_instr *intrin,
nir_ssa_def *index);
void brw_nir_rewrite_bindless_image_intrinsic(nir_intrinsic_instr *intrin,
bool
brw_nir_lower_storage_image(nir_shader *shader,
- const struct intel_device_info *devinfo,
- bool *uses_atomic_load_store)
+ const struct intel_device_info *devinfo)
{
bool progress = false;
case nir_intrinsic_image_deref_atomic_xor:
case nir_intrinsic_image_deref_atomic_exchange:
case nir_intrinsic_image_deref_atomic_comp_swap:
- if (uses_atomic_load_store)
- *uses_atomic_load_store = true;
if (lower_image_atomic_instr(&b, devinfo, intrin))
impl_progress = true;
break;
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
- NIR_PASS_V(nir, brw_nir_lower_storage_image, compiler->devinfo, NULL);
+ NIR_PASS_V(nir, brw_nir_lower_storage_image, compiler->devinfo);
NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
nir_address_format_64bit_global);
BITSET_COPY(prog->info.textures_used, prog->nir->info.textures_used);
BITSET_COPY(prog->info.textures_used_by_txf, prog->nir->info.textures_used_by_txf);
- NIR_PASS_V(prog->nir, brw_nir_lower_storage_image, devinfo, NULL);
+ NIR_PASS_V(prog->nir, brw_nir_lower_storage_image, devinfo);
if (prog->nir->info.stage == MESA_SHADER_COMPUTE &&
shader_prog->data->spirv) {