if (so->shader->compiler->gen >= 6)
progress |= OPT(s, nir_lower_ubo_vec4);
- OPT_V(s, ir3_nir_lower_io_offsets, so->shader->compiler->gpu_id);
+ OPT_V(s, ir3_nir_lower_io_offsets);
if (progress)
ir3_optimize_loop(so->shader->compiler, s);
bool ir3_nir_apply_trig_workarounds(nir_shader *shader);
bool ir3_nir_lower_imul(nir_shader *shader);
bool ir3_nir_lower_tg4_to_tex(nir_shader *shader);
-bool ir3_nir_lower_io_offsets(nir_shader *shader, int gpu_id);
+bool ir3_nir_lower_io_offsets(nir_shader *shader);
bool ir3_nir_lower_load_barycentric_at_sample(nir_shader *shader);
bool ir3_nir_lower_load_barycentric_at_offset(nir_shader *shader);
bool ir3_nir_move_varying_inputs(nir_shader *shader);
}
static bool
-lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx,
- int gpu_id)
+lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx)
{
bool progress = false;
}
static bool
-lower_io_offsets_func(nir_function_impl *impl, int gpu_id)
+lower_io_offsets_func(nir_function_impl *impl)
{
void *mem_ctx = ralloc_parent(impl);
nir_builder b;
bool progress = false;
nir_foreach_block_safe (block, impl) {
- progress |= lower_io_offsets_block(block, &b, mem_ctx, gpu_id);
+ progress |= lower_io_offsets_block(block, &b, mem_ctx);
}
if (progress) {
}
bool
-ir3_nir_lower_io_offsets(nir_shader *shader, int gpu_id)
+ir3_nir_lower_io_offsets(nir_shader *shader)
{
bool progress = false;
nir_foreach_function (function, shader) {
if (function->impl)
- progress |= lower_io_offsets_func(function->impl, gpu_id);
+ progress |= lower_io_offsets_func(function->impl);
}
return progress;