* we don't know what part of a compound variable is accessed, we allocate
* storage for the entire thing.
*/
-struct add_const_offset_to_base_params {
- nir_builder b;
- nir_variable_mode mode;
-};
static bool
-add_const_offset_to_base_block(nir_block *block, void *closure)
+add_const_offset_to_base_block(nir_block *block, nir_builder *b,
+ nir_variable_mode mode)
{
- struct add_const_offset_to_base_params *params = closure;
- nir_builder *b = ¶ms->b;
-
nir_foreach_instr_safe(block, instr) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
- if ((params->mode == nir_var_shader_in && is_input(intrin)) ||
- (params->mode == nir_var_shader_out && is_output(intrin))) {
+ if ((mode == nir_var_shader_in && is_input(intrin)) ||
+ (mode == nir_var_shader_out && is_output(intrin))) {
nir_src *offset = nir_get_io_offset_src(intrin);
nir_const_value *const_offset = nir_src_as_const_value(*offset);
static void
add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
{
- struct add_const_offset_to_base_params params = { .mode = mode };
-
nir_foreach_function(nir, f) {
if (f->impl) {
- nir_builder_init(¶ms.b, f->impl);
- nir_foreach_block_call(f->impl, add_const_offset_to_base_block, ¶ms);
+ nir_builder b;
+ nir_builder_init(&b, f->impl);
+ nir_foreach_block(block, f->impl) {
+ add_const_offset_to_base_block(block, &b, mode);
+ }
}
}
}
static bool
-remap_vs_attrs(nir_block *block, void *closure)
+remap_vs_attrs(nir_block *block, GLbitfield64 inputs_read)
{
- GLbitfield64 inputs_read = *((GLbitfield64 *) closure);
-
nir_foreach_instr(block, instr) {
if (instr->type != nir_instr_type_intrinsic)
continue;
}
static bool
-remap_inputs_with_vue_map(nir_block *block, void *closure)
+remap_inputs_with_vue_map(nir_block *block, const struct brw_vue_map *vue_map)
{
- const struct brw_vue_map *vue_map = closure;
-
nir_foreach_instr(block, instr) {
if (instr->type != nir_instr_type_intrinsic)
continue;
return true;
}
-struct remap_patch_urb_offsets_state {
- nir_builder b;
- const struct brw_vue_map *vue_map;
-};
-
static bool
-remap_patch_urb_offsets(nir_block *block, void *closure)
+remap_patch_urb_offsets(nir_block *block, nir_builder *b,
+ const struct brw_vue_map *vue_map)
{
- struct remap_patch_urb_offsets_state *state = closure;
-
nir_foreach_instr_safe(block, instr) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
- gl_shader_stage stage = state->b.shader->stage;
+ gl_shader_stage stage = b->shader->stage;
if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
(stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
- int vue_slot = state->vue_map->varying_to_slot[intrin->const_index[0]];
+ int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
assert(vue_slot != -1);
intrin->const_index[0] = vue_slot;
nir_const_value *const_vertex = nir_src_as_const_value(*vertex);
if (const_vertex) {
intrin->const_index[0] += const_vertex->u32[0] *
- state->vue_map->num_per_vertex_slots;
+ vue_map->num_per_vertex_slots;
} else {
- state->b.cursor = nir_before_instr(&intrin->instr);
+ b->cursor = nir_before_instr(&intrin->instr);
/* Multiply by the number of per-vertex slots. */
nir_ssa_def *vertex_offset =
- nir_imul(&state->b,
- nir_ssa_for_src(&state->b, *vertex, 1),
- nir_imm_int(&state->b,
- state->vue_map->num_per_vertex_slots));
+ nir_imul(b,
+ nir_ssa_for_src(b, *vertex, 1),
+ nir_imm_int(b,
+ vue_map->num_per_vertex_slots));
/* Add it to the existing offset */
nir_src *offset = nir_get_io_offset_src(intrin);
nir_ssa_def *total_offset =
- nir_iadd(&state->b, vertex_offset,
- nir_ssa_for_src(&state->b, *offset, 1));
+ nir_iadd(b, vertex_offset,
+ nir_ssa_for_src(b, *offset, 1));
nir_instr_rewrite_src(&intrin->instr, offset,
nir_src_for_ssa(total_offset));
nir_foreach_function(nir, function) {
if (function->impl) {
- nir_foreach_block_call(function->impl, remap_vs_attrs, &inputs_read);
+ nir_foreach_block(block, function->impl) {
+ remap_vs_attrs(block, inputs_read);
+ }
}
}
}
nir_foreach_function(nir, function) {
if (function->impl) {
- nir_foreach_block_call(function->impl, remap_inputs_with_vue_map,
- (void *) vue_map);
+ nir_foreach_block(block, function->impl) {
+ remap_inputs_with_vue_map(block, vue_map);
+ }
}
}
}
void
brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
{
- struct remap_patch_urb_offsets_state state;
- state.vue_map = vue_map;
-
foreach_list_typed(nir_variable, var, node, &nir->inputs) {
var->data.driver_location = var->data.location;
}
nir_foreach_function(nir, function) {
if (function->impl) {
- nir_builder_init(&state.b, function->impl);
- nir_foreach_block_call(function->impl, remap_patch_urb_offsets, &state);
+ nir_builder b;
+ nir_builder_init(&b, function->impl);
+ nir_foreach_block(block, function->impl) {
+ remap_patch_urb_offsets(block, &b, vue_map);
+ }
}
}
}
void
brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map)
{
- struct remap_patch_urb_offsets_state state;
- state.vue_map = vue_map;
-
nir_foreach_variable(var, &nir->outputs) {
var->data.driver_location = var->data.location;
}
nir_foreach_function(nir, function) {
if (function->impl) {
- nir_builder_init(&state.b, function->impl);
- nir_foreach_block_call(function->impl, remap_patch_urb_offsets, &state);
+ nir_builder b;
+ nir_builder_init(&b, function->impl);
+ nir_foreach_block(block, function->impl) {
+ remap_patch_urb_offsets(block, &b, vue_map);
+ }
}
}
}
}
static bool
-brw_nir_opt_peephole_ffma_block(nir_block *block, void *void_state)
+brw_nir_opt_peephole_ffma_block(nir_block *block, void *mem_ctx)
{
- struct peephole_ffma_state *state = void_state;
+ bool progress = false;
nir_foreach_instr_safe(block, instr) {
if (instr->type != nir_instr_type_alu)
if (abs) {
for (unsigned i = 0; i < 2; i++) {
- nir_alu_instr *abs = nir_alu_instr_create(state->mem_ctx,
- nir_op_fabs);
+ nir_alu_instr *abs = nir_alu_instr_create(mem_ctx, nir_op_fabs);
abs->src[0].src = nir_src_for_ssa(mul_src[i]);
nir_ssa_dest_init(&abs->instr, &abs->dest.dest,
mul_src[i]->num_components, bit_size, NULL);
}
if (negate) {
- nir_alu_instr *neg = nir_alu_instr_create(state->mem_ctx,
- nir_op_fneg);
+ nir_alu_instr *neg = nir_alu_instr_create(mem_ctx, nir_op_fneg);
neg->src[0].src = nir_src_for_ssa(mul_src[0]);
nir_ssa_dest_init(&neg->instr, &neg->dest.dest,
mul_src[0]->num_components, bit_size, NULL);
mul_src[0] = &neg->dest.dest.ssa;
}
- nir_alu_instr *ffma = nir_alu_instr_create(state->mem_ctx, nir_op_ffma);
+ nir_alu_instr *ffma = nir_alu_instr_create(mem_ctx, nir_op_ffma);
ffma->dest.saturate = add->dest.saturate;
ffma->dest.write_mask = add->dest.write_mask;
assert(list_empty(&add->dest.dest.ssa.uses));
nir_instr_remove(&add->instr);
- state->progress = true;
+ progress = true;
}
- return true;
+ return progress;
}
static bool
brw_nir_opt_peephole_ffma_impl(nir_function_impl *impl)
{
- struct peephole_ffma_state state;
-
- state.mem_ctx = ralloc_parent(impl);
- state.impl = impl;
- state.progress = false;
+ bool progress = false;
+ void *mem_ctx = ralloc_parent(impl);
- nir_foreach_block_call(impl, brw_nir_opt_peephole_ffma_block, &state);
+ nir_foreach_block(block, impl) {
+ progress |= brw_nir_opt_peephole_ffma_block(block, mem_ctx);
+ }
- if (state.progress)
+ if (progress)
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);
- return state.progress;
+ return progress;
}
bool