else
ish->needs_edge_flag = false;
- brw_preprocess_nir(screen->compiler, nir, NULL);
+ struct brw_nir_compiler_opts opts = {};
+ brw_preprocess_nir(screen->compiler, nir, &opts);
NIR_PASS_V(nir, brw_nir_lower_storage_image, devinfo);
NIR_PASS_V(nir, crocus_lower_storage_image_derefs);
NIR_PASS_V(nir, iris_fix_edge_flags);
- brw_preprocess_nir(screen->compiler, nir, NULL);
+ struct brw_nir_compiler_opts opts = {};
+ brw_preprocess_nir(screen->compiler, nir, &opts);
NIR_PASS_V(nir, brw_nir_lower_storage_image, devinfo);
NIR_PASS_V(nir, iris_lower_storage_image_derefs);
wm_prog_data->base.nr_params = 0;
wm_prog_data->base.param = NULL;
- brw_preprocess_nir(compiler, nir, NULL);
+ struct brw_nir_compiler_opts opts = {};
+ brw_preprocess_nir(compiler, nir, &opts);
nir_remove_dead_variables(nir, nir_var_shader_in, NULL);
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
nir->options = compiler->nir_options[MESA_SHADER_VERTEX];
- brw_preprocess_nir(compiler, nir, NULL);
+ struct brw_nir_compiler_opts opts = {};
+ brw_preprocess_nir(compiler, nir, &opts);
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
vs_prog_data->inputs_read = nir->info.inputs_read;
memset(cs_prog_data, 0, sizeof(*cs_prog_data));
- brw_preprocess_nir(compiler, nir, NULL);
+ struct brw_nir_compiler_opts opts = {};
+ brw_preprocess_nir(compiler, nir, &opts);
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
NIR_PASS_V(nir, nir_lower_io, nir_var_uniform, type_size_scalar_bytes,
nir_var_mem_shared | nir_var_mem_global,
glsl_get_cl_type_size_align);
- brw_preprocess_nir(compiler, nir, NULL);
+ struct brw_nir_compiler_opts opts = {};
+ brw_preprocess_nir(compiler, nir, &opts);
int max_arg_idx = -1;
nir_foreach_uniform_variable(var, nir) {
*/
void
brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
- const nir_shader *softfp64)
+ const struct brw_nir_compiler_opts *opts)
{
const struct intel_device_info *devinfo = compiler->devinfo;
UNUSED bool progress; /* Written by OPT */
!(devinfo->ver >= 10 || devinfo->platform == INTEL_PLATFORM_KBL))
OPT(brw_nir_apply_trig_workarounds);
- if (devinfo->ver >= 12)
+ /* This workaround existing for performance reasons. Since it requires not
+ * setting RENDER_SURFACE_STATE::SurfaceArray when the array length is 1,
+ * we're loosing the HW robustness feature in that case.
+ *
+ * So when robust image access is enabled, just avoid the workaround.
+ */
+ if (devinfo->ver >= 12 && !opts->robust_image_access)
OPT(brw_nir_clamp_image_1d_2d_array_sizes);
const nir_lower_tex_options tex_options = {
brw_nir_optimize(nir, compiler, is_scalar, true);
- OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
+ OPT(nir_lower_doubles, opts->softfp64, nir->options->lower_doubles_options);
if (OPT(nir_lower_int64)) {
OPT(nir_opt_algebraic);
- OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
+ OPT(nir_lower_doubles, opts->softfp64,
+ nir->options->lower_doubles_options);
}
OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
nir_validate_shader(nir, "in brw_nir_create_passthrough_tcs");
- brw_preprocess_nir(compiler, nir, NULL);
+ struct brw_nir_compiler_opts opts = {};
+ brw_preprocess_nir(compiler, nir, &opts);
return nir;
}
void brw_nir_analyze_boolean_resolves(nir_shader *nir);
+struct brw_nir_compiler_opts {
+ /* Soft floating point implementation shader */
+ const nir_shader *softfp64;
+
+ /* Whether robust image access is enabled */
+ bool robust_image_access;
+};
+
void brw_preprocess_nir(const struct brw_compiler *compiler,
nir_shader *nir,
- const nir_shader *softfp64);
+ const struct brw_nir_compiler_opts *opts);
void
brw_nir_link_shaders(const struct brw_compiler *compiler,
nir_shader *nir = b.shader;
nir->info.name = ralloc_strdup(nir, "RT: TraceRay trampoline");
nir_validate_shader(nir, "in brw_nir_create_raygen_trampoline");
- brw_preprocess_nir(compiler, nir, NULL);
+
+ struct brw_nir_compiler_opts opts = {};
+ brw_preprocess_nir(compiler, nir, &opts);
NIR_PASS_V(nir, brw_nir_lower_rt_intrinsics, devinfo);
/* Vulkan uses the separate-shader linking model */
nir->info.separate_shader = true;
- brw_preprocess_nir(compiler, nir, device->fp64_nir);
+ struct brw_nir_compiler_opts opts = {
+ .softfp64 = device->fp64_nir,
+ };
+ brw_preprocess_nir(compiler, nir, &opts);
if (nir->info.stage == MESA_SHADER_MESH && !nir->info.mesh.nv) {
bool progress = false;
/* Vulkan uses the separate-shader linking model */
nir->info.separate_shader = true;
- brw_preprocess_nir(compiler, nir, NULL);
+ struct brw_nir_compiler_opts opts = {};
+
+ brw_preprocess_nir(compiler, nir, &opts);
return nir;
}