sed + ninja clang-format + fix up spacing for common code.
If you are unhappy that I did not manually change the whitespace of your driver,
you need to enable clang-format for it so the formatting would happen
automatically.
Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Acked-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24428>
s.primitive_count);
/* Wait for all stores to finish. */
- nir_scoped_barrier(b, .execution_scope = SCOPE_INVOCATION,
+ nir_barrier(b, .execution_scope = SCOPE_INVOCATION,
.memory_scope = SCOPE_DEVICE,
.memory_semantics = NIR_MEMORY_RELEASE,
.memory_modes = nir_var_shader_out | nir_var_mem_ssbo |
nir_store_shared(b, nir_u2u8(b, surviving_invocations_in_current_wave), lds_offset);
- nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
.memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
nir_ssa_def *packed_counts =
/* pack user edge flag info into arg */
if (s->has_user_edgeflags) {
/* Workgroup barrier: wait for ES threads store user edge flags to LDS */
- nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
* Waves that have no vertices and primitives left can s_endpgm right here.
*/
- nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
.memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
nir_ssa_def *es_survived = nir_ilt(b, invocation_index, num_live_vertices_in_workgroup);
}
nir_pop_if(b, if_es_thread);
- nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
.memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
nir_store_var(b, s->gs_accepted_var, nir_imm_false(b), 0x1u);
}
nir_pop_if(b, if_gs_thread);
- nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
.memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
nir_store_var(b, s->es_accepted_var, nir_imm_false(b), 0x1u);
}
nir_pop_if(b, if_invocation_0);
- nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
/* Wait for culling to finish using LDS. */
if (need_prim_id_store_shared || has_user_edgeflags) {
- nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
emit_ngg_nogs_prim_id_store_shared(b, &state);
/* Wait for GS threads to store primitive ID in LDS. */
- nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_shared);
}
nir_pop_if(b, if_prim_enable);
/* Wait for LDS primflag access done. */
- nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
}
/* Workgroup barrier: wait for LDS scratch reads finish. */
- nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
ngg_gs_setup_vertex_compaction(b, vertex_live, tid_in_tg, exporter_tid_in_tg, s);
/* Workgroup barrier: wait for all LDS stores to finish. */
- nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
.memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
ngg_gs_export_primitives(b, max_prmcnt, tid_in_tg, exporter_tid_in_tg, out_vtx_primflag_0, s);
nir_builder *b = &builder; /* This is to avoid the & */
/* Workgroup barrier: wait for ES threads */
- nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
.memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
state.lds_addr_gs_out_vtx = nir_load_lds_ngg_gs_out_vertex_base_amd(b);
nir_pop_if(b, if_gs_thread);
/* Workgroup barrier: wait for all GS threads to finish */
- nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
.memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
if (state.streamout_enabled)
}
static nir_ssa_def *
-update_ms_scoped_barrier(nir_builder *b,
+update_ms_barrier(nir_builder *b,
nir_intrinsic_instr *intrin,
lower_ngg_ms_state *s)
{
case nir_intrinsic_load_per_vertex_output:
case nir_intrinsic_load_per_primitive_output:
return ms_load_arrayed_output_intrin(b, intrin, s);
- case nir_intrinsic_scoped_barrier:
- return update_ms_scoped_barrier(b, intrin, s);
+ case nir_intrinsic_barrier:
+ return update_ms_barrier(b, intrin, s);
case nir_intrinsic_load_workgroup_index:
return lower_ms_load_workgroup_index(b, intrin, s);
case nir_intrinsic_set_vertex_and_primitive_count:
intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
intrin->intrinsic == nir_intrinsic_store_per_primitive_output ||
intrin->intrinsic == nir_intrinsic_load_per_primitive_output ||
- intrin->intrinsic == nir_intrinsic_scoped_barrier ||
+ intrin->intrinsic == nir_intrinsic_barrier ||
intrin->intrinsic == nir_intrinsic_load_workgroup_index ||
intrin->intrinsic == nir_intrinsic_set_vertex_and_primitive_count;
}
nir_if *if_wave_0 = nir_push_if(b, nir_ieq_imm(b, wave_id, 0));
{
nir_store_shared(b, workgroup_index, zero, .base = workgroup_index_lds_addr);
- nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
}
nir_push_else(b, if_wave_0);
{
- nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
}
nir_pop_if(b, if_elected);
- nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
}
nir_push_else(b, if_wave_0);
{
- nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
nir_block *last_block = nir_impl_last_block(b->impl);
b->cursor = nir_after_block(last_block);
- nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
.memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_shader_out|nir_var_mem_shared);
nir_ssa_def *num_prm;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
bool is_workgroup_barrier =
- intrin->intrinsic == nir_intrinsic_scoped_barrier &&
+ intrin->intrinsic == nir_intrinsic_barrier &&
nir_intrinsic_execution_scope(intrin) == SCOPE_WORKGROUP;
if (!is_workgroup_barrier)
}
nir_pop_if(b, if_first_in_workgroup);
- nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_shader_out | nir_var_mem_shared);
}
nir_pop_if(b, if_elected_again);
- nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_shader_out | nir_var_mem_shared);
{
nir_loop *loop = nir_push_loop(b);
{
- nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_shader_out | nir_var_mem_shared);
* always a waitcnt_vscnt instruction in order to avoid a race condition
* between payload stores and their loads after mesh shaders launch.
*/
- nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+ nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_DEVICE,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_task_payload | nir_var_shader_out |
}
static void
-update_hs_scoped_barrier(nir_intrinsic_instr *intrin, lower_tess_io_state *st)
+update_hs_barrier(nir_intrinsic_instr *intrin, lower_tess_io_state *st)
{
/* Output loads and stores are lowered to shared memory access,
* so we have to update the barriers to also reflect this.
} else if (intrin->intrinsic == nir_intrinsic_load_output ||
intrin->intrinsic == nir_intrinsic_load_per_vertex_output) {
return lower_hs_output_load(b, intrin, st);
- } else if (intrin->intrinsic == nir_intrinsic_scoped_barrier) {
- update_hs_scoped_barrier(intrin, st);
+ } else if (intrin->intrinsic == nir_intrinsic_barrier) {
+ update_hs_barrier(intrin, st);
return NIR_LOWER_INSTR_PROGRESS;
} else {
unreachable("intrinsic not supported by lower_hs_output_access");
mesa_scope scope = st->tcs_out_patch_fits_subgroup ?
SCOPE_SUBGROUP : SCOPE_WORKGROUP;
- nir_scoped_barrier(b, .execution_scope = scope, .memory_scope = scope,
+ nir_barrier(b, .execution_scope = scope, .memory_scope = scope,
.memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_shared);
}
intrin->intrinsic == nir_intrinsic_store_per_vertex_output ||
intrin->intrinsic == nir_intrinsic_load_output ||
intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
- intrin->intrinsic == nir_intrinsic_scoped_barrier;
+ intrin->intrinsic == nir_intrinsic_barrier;
}
static bool
}
void
-emit_scoped_barrier(isel_context* ctx, nir_intrinsic_instr* instr)
+emit_barrier(isel_context* ctx, nir_intrinsic_instr* instr)
{
Builder bld(ctx->program, ctx->block);
case nir_intrinsic_ssbo_atomic_swap: visit_atomic_ssbo(ctx, instr); break;
case nir_intrinsic_load_scratch: visit_load_scratch(ctx, instr); break;
case nir_intrinsic_store_scratch: visit_store_scratch(ctx, instr); break;
- case nir_intrinsic_scoped_barrier: emit_scoped_barrier(ctx, instr); break;
+ case nir_intrinsic_barrier: emit_barrier(ctx, instr); break;
case nir_intrinsic_load_num_workgroups: {
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
if (ctx->options->load_grid_size_from_user_sgpr) {
case nir_intrinsic_demote_if:
emit_demote(ctx, instr);
break;
- case nir_intrinsic_scoped_barrier: {
+ case nir_intrinsic_barrier: {
assert(!(nir_intrinsic_memory_semantics(instr) &
(NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_MAKE_VISIBLE)));
* creating a vmcnt(0) because it expects the L1 cache to keep memory
* operations in-order for the same workgroup. The vmcnt(0) seems
* necessary however. */
- nir_scoped_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE,
- .memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo);
+ nir_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE,
+ .memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo);
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, global_id, nir_ssa_undef(&b, 1, 32), data,
nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
* creating a vmcnt(0) because it expects the L1 cache to keep memory
* operations in-order for the same workgroup. The vmcnt(0) seems
* necessary however. */
- nir_scoped_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE,
- .memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo);
+ nir_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE,
+ .memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo);
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_ssa_undef(&b, 1, 32), data,
nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
return agx_load_compute_dimension(
b, dst, instr, AGX_SR_THREAD_POSITION_IN_THREADGROUP_X);
- case nir_intrinsic_scoped_barrier: {
+ case nir_intrinsic_barrier: {
assert(!b->shader->is_preamble && "invalid");
bool needs_image_barriers = false;
break;
}
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
/* Ensure that the TMU operations before the barrier are flushed
* before the ones after the barrier.
*/
case ir_intrinsic_memory_barrier_shared:
case ir_intrinsic_memory_barrier_atomic_counter:
case ir_intrinsic_group_memory_barrier:
- op = nir_intrinsic_scoped_barrier;
+ op = nir_intrinsic_barrier;
break;
case ir_intrinsic_image_size:
op = nir_intrinsic_image_deref_size;
nir_builder_instr_insert(&b, &instr->instr);
break;
}
- case nir_intrinsic_scoped_barrier: {
- /* The nir_intrinsic_scoped_barrier follows the general
+ case nir_intrinsic_barrier: {
+ /* The nir_intrinsic_barrier follows the general
* semantics of SPIR-V memory barriers, so this and other memory
* barriers use the mapping based on GLSL->SPIR-V from
*
nir_visitor::visit(ir_barrier *)
{
if (shader->info.stage == MESA_SHADER_COMPUTE) {
- nir_scoped_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
- NIR_MEMORY_ACQ_REL, nir_var_mem_shared);
+ nir_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
+ NIR_MEMORY_ACQ_REL, nir_var_mem_shared);
} else if (shader->info.stage == MESA_SHADER_TESS_CTRL) {
- nir_scoped_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
- NIR_MEMORY_ACQ_REL, nir_var_shader_out);
+ nir_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
+ NIR_MEMORY_ACQ_REL, nir_var_shader_out);
}
}
nir_memory_semantics semantics,
nir_variable_mode modes)
{
- nir_scoped_barrier(b, SCOPE_NONE, scope, semantics, modes);
+ nir_barrier(b, SCOPE_NONE, scope, semantics, modes);
}
nir_ssa_def *
break;
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
shader->info.uses_control_barrier |=
nir_intrinsic_execution_scope(instr) != SCOPE_NONE;
# OpMemoryBarrier and OpControlBarrier, used to implement Vulkan Memory Model.
# Storage that the barrier applies is represented using NIR variable modes.
# For an OpMemoryBarrier, set EXECUTION_SCOPE to SCOPE_NONE.
-intrinsic("scoped_barrier",
+intrinsic("barrier",
indices=[EXECUTION_SCOPE, MEMORY_SCOPE, MEMORY_SEMANTICS, MEMORY_MODES])
# Shader clock intrinsic with semantics analogous to the clock2x32ARB()
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
- if (intrin->intrinsic == nir_intrinsic_scoped_barrier &&
+ if (intrin->intrinsic == nir_intrinsic_barrier &&
(nir_intrinsic_memory_semantics(intrin) & vis_avail_sem)) {
*cur_modes |= nir_intrinsic_memory_modes(intrin);
nir_ssa_def *zero = nir_imm_int(b, 0);
nir_store_shared(b, zero, zero, .base = s->task_count_shared_addr);
- nir_scoped_barrier(b,
+ nir_barrier(b,
.execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_RELEASE,
*/
b->cursor = nir_after_cf_list(&b->impl->body);
- nir_scoped_barrier(b,
+ nir_barrier(b,
.execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQUIRE,
/* Wait for all previous shared stores to finish.
* This is necessary because we placed the payload in shared memory.
*/
- nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
- .memory_scope = SCOPE_WORKGROUP,
- .memory_semantics = NIR_MEMORY_ACQ_REL,
- .memory_modes = nir_var_mem_shared);
+ nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+ .memory_scope = SCOPE_WORKGROUP,
+ .memory_semantics = NIR_MEMORY_ACQ_REL,
+ .memory_modes = nir_var_mem_shared);
/* Payload_size is a size of user-accessible payload, but on some
* hardware (e.g. Intel) payload has a private header, which we have
}
nir_pop_loop(&b, loop);
- nir_scoped_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
- NIR_MEMORY_ACQ_REL, nir_var_mem_shared);
+ nir_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP, NIR_MEMORY_ACQ_REL,
+ nir_var_mem_shared);
nir_metadata_preserve(nir_shader_get_entrypoint(shader), nir_metadata_none);
}
nir_intrinsic_instr *current = nir_instr_as_intrinsic(instr);
- if (current->intrinsic != nir_intrinsic_scoped_barrier) {
+ if (current->intrinsic != nir_intrinsic_barrier) {
prev = NULL;
continue;
}
}
break;
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_RELEASE) {
combine_stores_with_modes(state,
nir_intrinsic_memory_modes(intrin));
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
switch (intrin->intrinsic) {
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
written->modes |= nir_intrinsic_memory_modes(intrin);
break;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
switch (intrin->intrinsic) {
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
if (debug) dump_instr(instr);
if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
switch (intrin->intrinsic) {
- case nir_intrinsic_scoped_barrier: {
+ case nir_intrinsic_barrier: {
if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_RELEASE) {
clear_unused_for_modes(&unused_writes,
nir_intrinsic_memory_modes(intrin));
acquire = false;
modes = nir_var_all;
break;
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
if (nir_intrinsic_memory_scope(intrin) == SCOPE_NONE)
break;
add_write_dep(state, &state->store_shared, n);
break;
- case nir_intrinsic_scoped_barrier: {
+ case nir_intrinsic_barrier: {
const nir_variable_mode modes = nir_intrinsic_memory_modes(instr);
if (modes & nir_var_mem_shared)
TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_barrier)
{
create_load(nir_var_mem_ssbo, 0, 0, 0x1);
- nir_scoped_barrier(b, SCOPE_WORKGROUP, SCOPE_NONE,
+ nir_barrier(b, SCOPE_WORKGROUP, SCOPE_NONE,
(nir_memory_semantics)0, (nir_variable_mode)0);
create_load(nir_var_mem_ssbo, 0, 4, 0x2);
else
nir_mem_scope = vtn_translate_scope(b, mem_scope);
- nir_scoped_barrier(&b->nb, .execution_scope=nir_exec_scope, .memory_scope=nir_mem_scope,
- .memory_semantics=nir_semantics, .memory_modes=modes);
+ nir_barrier(&b->nb, .execution_scope=nir_exec_scope, .memory_scope=nir_mem_scope,
+ .memory_semantics=nir_semantics, .memory_modes=modes);
}
void
if (nir_semantics == 0 || modes == 0)
return;
- nir_scoped_barrier(&b->nb, .memory_scope=vtn_translate_scope(b, scope),
- .memory_semantics=nir_semantics,
- .memory_modes=modes);
+ nir_barrier(&b->nb, .memory_scope=vtn_translate_scope(b, scope),
+ .memory_semantics=nir_semantics,
+ .memory_modes=modes);
}
struct vtn_ssa_value *
get_nir(sizeof(words) / sizeof(words[0]), words);
- nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_scoped_barrier, 0);
+ nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_barrier, 0);
ASSERT_NE(intrinsic, nullptr);
EXPECT_EQ(nir_intrinsic_memory_semantics(intrinsic), NIR_MEMORY_MAKE_VISIBLE | NIR_MEMORY_ACQUIRE);
get_nir(sizeof(words) / sizeof(words[0]), words);
- nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_scoped_barrier, 0);
+ nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_barrier, 0);
ASSERT_NE(intrinsic, nullptr);
EXPECT_EQ(nir_intrinsic_memory_semantics(intrinsic), NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_RELEASE);
get_nir(sizeof(words) / sizeof(words[0]), words);
- nir_intrinsic_instr *first = find_intrinsic(nir_intrinsic_scoped_barrier, 0);
- nir_intrinsic_instr *second = find_intrinsic(nir_intrinsic_scoped_barrier, 1);
+ nir_intrinsic_instr *first = find_intrinsic(nir_intrinsic_barrier, 0);
+ nir_intrinsic_instr *second = find_intrinsic(nir_intrinsic_barrier, 1);
ASSERT_NE(first, nullptr);
ASSERT_NE(second, nullptr);
get_nir(sizeof(words) / sizeof(words[0]), words);
- nir_intrinsic_instr *first = find_intrinsic(nir_intrinsic_scoped_barrier, 0);
- nir_intrinsic_instr *second = find_intrinsic(nir_intrinsic_scoped_barrier, 1);
+ nir_intrinsic_instr *first = find_intrinsic(nir_intrinsic_barrier, 0);
+ nir_intrinsic_instr *second = find_intrinsic(nir_intrinsic_barrier, 1);
ASSERT_NE(first, nullptr);
ASSERT_NE(second, nullptr);
get_nir(sizeof(words) / sizeof(words[0]), words);
- nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_scoped_barrier, 0);
+ nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_barrier, 0);
ASSERT_NE(intrinsic, nullptr);
EXPECT_EQ(nir_intrinsic_memory_semantics(intrinsic), NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_RELEASE);
get_nir(sizeof(words) / sizeof(words[0]), words);
- nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_scoped_barrier, 0);
+ nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_barrier, 0);
ASSERT_NE(intrinsic, nullptr);
EXPECT_EQ(nir_intrinsic_memory_semantics(intrinsic), NIR_MEMORY_MAKE_VISIBLE | NIR_MEMORY_ACQUIRE);
* The libclc we have uses a __local pointer but clang gives us generic
* pointers. Fortunately, the whole function is just a barrier.
*/
- nir_scoped_barrier(&b->nb, .execution_scope = SCOPE_WORKGROUP,
- .memory_scope = SCOPE_WORKGROUP,
- .memory_semantics = NIR_MEMORY_ACQUIRE |
- NIR_MEMORY_RELEASE,
- .memory_modes = nir_var_mem_shared |
- nir_var_mem_global);
+ nir_barrier(&b->nb, .execution_scope = SCOPE_WORKGROUP,
+ .memory_scope = SCOPE_WORKGROUP,
+ .memory_semantics = NIR_MEMORY_ACQUIRE |
+ NIR_MEMORY_RELEASE,
+ .memory_modes = nir_var_mem_shared |
+ nir_var_mem_global);
break;
}
default:
case nir_intrinsic_bindless_image_atomic_swap:
dst[0] = ctx->funcs->emit_intrinsic_atomic_image(ctx, intr);
break;
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
emit_intrinsic_barrier(ctx, intr);
/* note that blk ptr no longer valid, make that obvious: */
b = NULL;
case nir_intrinsic_shared_atomic_swap:
visit_shared_atomic(bld_base, instr, result);
break;
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
visit_barrier(bld_base, instr);
break;
case nir_intrinsic_load_kernel_input:
ntt_emit_image_load_store(c, instr);
break;
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
ntt_emit_barrier(c, instr);
break;
static void
ttn_barrier(nir_builder *b)
{
- nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP);
+ nir_barrier(b, .execution_scope = SCOPE_WORKGROUP);
}
static void
m_flags.set(sh_writes_memory);
m_flags.set(sh_uses_images);
break;
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
m_chain_instr.prepare_mem_barrier |=
(nir_intrinsic_memory_modes(intr) &
(nir_var_mem_ssbo | nir_var_mem_global | nir_var_image) &&
return emit_load_tcs_param_base(intr, 0);
case nir_intrinsic_load_tcs_out_param_base_r600:
return emit_load_tcs_param_base(intr, 16);
- case nir_intrinsic_scoped_barrier:
- return emit_scoped_barrier(intr);
+ case nir_intrinsic_barrier:
+ return emit_barrier(intr);
case nir_intrinsic_shared_atomic:
case nir_intrinsic_shared_atomic_swap:
return emit_atomic_local_shared(intr);
return true;
}
-bool Shader::emit_scoped_barrier(nir_intrinsic_instr *intr)
+bool Shader::emit_barrier(nir_intrinsic_instr *intr)
{
if ((nir_intrinsic_execution_scope(intr) == SCOPE_WORKGROUP)) {
bool emit_group_barrier(nir_intrinsic_instr *intr);
bool emit_shader_clock(nir_intrinsic_instr *instr);
bool emit_wait_ack();
- bool emit_scoped_barrier(nir_intrinsic_instr *instr);
+ bool emit_barrier(nir_intrinsic_instr *instr);
bool emit_load_reg(nir_intrinsic_instr *intr);
bool emit_load_reg_indirect(nir_intrinsic_instr *intr);
bool emit_store_reg(nir_intrinsic_instr *intr);
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
- if (intrin->intrinsic == nir_intrinsic_scoped_barrier &&
+ if (intrin->intrinsic == nir_intrinsic_barrier &&
nir_intrinsic_execution_scope(intrin) >= SCOPE_WORKGROUP) {
/* If we find a barrier in nested control flow put this in the
SpvBuiltInTessCoord, nir_type_float);
break;
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
emit_barrier(ctx, intr);
break;
}
static bool
-remove_scoped_barriers_impl(nir_builder *b, nir_instr *instr, void *data)
+remove_barriers_impl(nir_builder *b, nir_instr *instr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
- if (intr->intrinsic != nir_intrinsic_scoped_barrier)
+ if (intr->intrinsic != nir_intrinsic_barrier)
return false;
if (data) {
if (nir_intrinsic_memory_scope(intr) == SCOPE_WORKGROUP ||
}
static bool
-remove_scoped_barriers(nir_shader *nir, bool is_compute)
+remove_barriers(nir_shader *nir, bool is_compute)
{
- return nir_shader_instructions_pass(nir, remove_scoped_barriers_impl, nir_metadata_dominance, (void*)is_compute);
+ return nir_shader_instructions_pass(nir, remove_barriers_impl, nir_metadata_dominance, (void*)is_compute);
}
static bool
lvp_shader_lower(struct lvp_device *pdevice, nir_shader *nir, struct lvp_shader *shader, struct lvp_pipeline_layout *layout)
{
if (nir->info.stage != MESA_SHADER_TESS_CTRL)
- NIR_PASS_V(nir, remove_scoped_barriers, nir->info.stage == MESA_SHADER_COMPUTE || nir->info.stage == MESA_SHADER_MESH || nir->info.stage == MESA_SHADER_TASK);
+ NIR_PASS_V(nir, remove_barriers, nir->info.stage == MESA_SHADER_COMPUTE || nir->info.stage == MESA_SHADER_MESH || nir->info.stage == MESA_SHADER_TASK);
const struct nir_lower_sysvals_to_varyings_options sysvals_to_varyings = {
.frag_coord = true,
bld.MOV(retype(dst, invocation_id.type), invocation_id);
break;
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
if (nir_intrinsic_memory_scope(instr) != SCOPE_NONE)
nir_emit_intrinsic(bld, instr);
if (nir_intrinsic_execution_scope(instr) == SCOPE_WORKGROUP) {
dest = get_nir_dest(instr->dest);
switch (instr->intrinsic) {
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
if (nir_intrinsic_memory_scope(instr) != SCOPE_NONE)
nir_emit_intrinsic(bld, instr);
if (nir_intrinsic_execution_scope(instr) == SCOPE_WORKGROUP) {
break;
}
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
case nir_intrinsic_begin_invocation_interlock:
case nir_intrinsic_end_invocation_interlock: {
bool ugm_fence, slm_fence, tgm_fence, urb_fence;
*/
switch (instr->intrinsic) {
- case nir_intrinsic_scoped_barrier: {
+ case nir_intrinsic_barrier: {
/* Note we only care about the memory part of the
* barrier. The execution part will be taken care
* of by the stage specific intrinsic handler functions.
* may start filling MUE before other finished initializing.
*/
if (workgroup_size > dispatch_width) {
- nir_scoped_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
+ nir_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
NIR_MEMORY_ACQ_REL, nir_var_shader_out);
}
break;
}
- case nir_intrinsic_scoped_barrier: {
+ case nir_intrinsic_barrier: {
if (nir_intrinsic_memory_scope(instr) == SCOPE_NONE)
break;
const vec4_builder bld =
break;
}
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
if (nir_intrinsic_memory_scope(instr) != SCOPE_NONE)
vec4_visitor::nir_emit_intrinsic(instr);
if (nir_intrinsic_execution_scope(instr) == SCOPE_WORKGROUP) {
nir_builder b = nir_builder_at(nir_after_cf_list(&impl->body));
/* wait for all subgroups to finish */
- nir_scoped_barrier(&b, SCOPE_WORKGROUP);
+ nir_barrier(&b, SCOPE_WORKGROUP);
nir_ssa_def *zero = nir_imm_int(&b, 0);
nir_store_deref(b, counter_deref, nir_imm_int(b, 0), 1);
nir_pop_if(b, nif);
- nir_scoped_barrier(b,
+ nir_barrier(b,
.execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
static bool
is_memory_barrier_tcs_patch(const nir_intrinsic_instr *intr)
{
- if (intr->intrinsic == nir_intrinsic_scoped_barrier &&
+ if (intr->intrinsic == nir_intrinsic_barrier &&
nir_intrinsic_memory_modes(intr) & nir_var_shader_out) {
assert(nir_intrinsic_memory_modes(intr) == nir_var_shader_out);
assert(nir_intrinsic_memory_scope(intr) == SCOPE_WORKGROUP);
nir_ssa_def_rewrite_uses(&intr->dest.ssa, state.count);
break;
}
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
if (!is_memory_barrier_tcs_patch(intr))
break;
return emit_emit_vertex(ctx, intr);
case nir_intrinsic_end_primitive:
return emit_end_primitive(ctx, intr);
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
return emit_barrier(ctx, intr);
case nir_intrinsic_ssbo_atomic:
return emit_ssbo_atomic(ctx, intr);
break;
}
- case nir_intrinsic_scoped_barrier: {
+ case nir_intrinsic_barrier: {
mesa_scope exec_scope = nir_intrinsic_execution_scope(insn);
mesa_scope mem_scope = nir_intrinsic_memory_scope(insn);
nir_variable_mode modes = nir_intrinsic_memory_modes(insn);
bi_emit_store(b, instr, BI_SEG_WLS);
break;
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
if (nir_intrinsic_execution_scope(instr) != SCOPE_NONE) {
assert(b->shader->stage != MESA_SHADER_FRAGMENT);
assert(nir_intrinsic_execution_scope(instr) > SCOPE_SUBGROUP &&
emit_special(ctx, instr, 97);
break;
- case nir_intrinsic_scoped_barrier:
+ case nir_intrinsic_barrier:
if (nir_intrinsic_execution_scope(instr) != SCOPE_NONE) {
schedule_barrier(ctx);
emit_control_barrier(ctx);