nir: Rename scoped_barrier -> barrier
authorAlyssa Rosenzweig <alyssa@rosenzweig.io>
Fri, 28 Jul 2023 19:08:00 +0000 (15:08 -0400)
committerMarge Bot <emma+marge@anholt.net>
Tue, 1 Aug 2023 23:18:29 +0000 (23:18 +0000)
sed + ninja clang-format + fix up spacing for common code.

If you are unhappy that I did not manually change the whitespace of your driver,
you need to enable clang-format for it so the formatting would happen
automatically.

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Acked-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24428>

47 files changed:
src/amd/common/ac_nir.c
src/amd/common/ac_nir_lower_ngg.c
src/amd/common/ac_nir_lower_taskmesh_io_to_mem.c
src/amd/common/ac_nir_lower_tess_io_to_mem.c
src/amd/compiler/aco_instruction_selection.cpp
src/amd/llvm/ac_nir_to_llvm.c
src/amd/vulkan/meta/radv_meta_decompress.c
src/amd/vulkan/meta/radv_meta_fast_clear.c
src/asahi/compiler/agx_compile.c
src/broadcom/compiler/nir_to_vir.c
src/compiler/glsl/glsl_to_nir.cpp
src/compiler/nir/nir_builder.h
src/compiler/nir/nir_gather_info.c
src/compiler/nir/nir_intrinsics.py
src/compiler/nir/nir_lower_memory_model.c
src/compiler/nir/nir_lower_task_shader.c
src/compiler/nir/nir_lower_variable_initializers.c
src/compiler/nir/nir_opt_barriers.c
src/compiler/nir/nir_opt_combine_stores.c
src/compiler/nir/nir_opt_copy_prop_vars.c
src/compiler/nir/nir_opt_dead_write_vars.c
src/compiler/nir/nir_opt_load_store_vectorize.c
src/compiler/nir/nir_schedule.c
src/compiler/nir/tests/load_store_vectorizer_tests.cpp
src/compiler/spirv/spirv_to_nir.c
src/compiler/spirv/tests/avail_vis.cpp
src/compiler/spirv/vtn_opencl.c
src/freedreno/ir3/ir3_compiler_nir.c
src/gallium/auxiliary/gallivm/lp_bld_nir.c
src/gallium/auxiliary/nir/nir_to_tgsi.c
src/gallium/auxiliary/nir/tgsi_to_nir.c
src/gallium/drivers/r600/sfn/sfn_shader.cpp
src/gallium/drivers/r600/sfn/sfn_shader.h
src/gallium/drivers/radeonsi/si_shader_info.c
src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c
src/gallium/frontends/lavapipe/lvp_pipeline.c
src/intel/compiler/brw_fs_nir.cpp
src/intel/compiler/brw_mesh.cpp
src/intel/compiler/brw_vec4_nir.cpp
src/intel/compiler/brw_vec4_tcs.cpp
src/intel/vulkan/anv_mesh_perprim_wa.c
src/microsoft/compiler/dxil_nir.c
src/microsoft/compiler/dxil_nir_tess.c
src/microsoft/compiler/nir_to_dxil.c
src/nouveau/codegen/nv50_ir_from_nir.cpp
src/panfrost/compiler/bifrost_compile.c
src/panfrost/midgard/midgard_compile.c

index 0df5d71..bf58a79 100644 (file)
@@ -1179,7 +1179,7 @@ ac_nir_lower_legacy_gs(nir_shader *nir,
                                           s.primitive_count);
 
    /* Wait for all stores to finish. */
-   nir_scoped_barrier(b, .execution_scope = SCOPE_INVOCATION,
+   nir_barrier(b, .execution_scope = SCOPE_INVOCATION,
                       .memory_scope = SCOPE_DEVICE,
                       .memory_semantics = NIR_MEMORY_RELEASE,
                       .memory_modes = nir_var_shader_out | nir_var_mem_ssbo |
index 1f9ce6a..98673a2 100644 (file)
@@ -358,7 +358,7 @@ repack_invocations_in_workgroup(nir_builder *b, nir_ssa_def *input_bool,
 
    nir_store_shared(b, nir_u2u8(b, surviving_invocations_in_current_wave), lds_offset);
 
-   nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
+   nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
                          .memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
 
    nir_ssa_def *packed_counts =
@@ -564,7 +564,7 @@ emit_ngg_nogs_prim_export(nir_builder *b, lower_ngg_nogs_state *s, nir_ssa_def *
       /* pack user edge flag info into arg */
       if (s->has_user_edgeflags) {
          /* Workgroup barrier: wait for ES threads store user edge flags to LDS */
-         nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+         nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
                             .memory_scope = SCOPE_WORKGROUP,
                             .memory_semantics = NIR_MEMORY_ACQ_REL,
                             .memory_modes = nir_var_mem_shared);
@@ -992,7 +992,7 @@ compact_vertices_after_culling(nir_builder *b,
     * Waves that have no vertices and primitives left can s_endpgm right here.
     */
 
-   nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
+   nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
                          .memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
 
    nir_ssa_def *es_survived = nir_ilt(b, invocation_index, num_live_vertices_in_workgroup);
@@ -1538,7 +1538,7 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
       }
       nir_pop_if(b, if_es_thread);
 
-      nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
+      nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
                             .memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
 
       nir_store_var(b, s->gs_accepted_var, nir_imm_false(b), 0x1u);
@@ -1591,7 +1591,7 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
       }
       nir_pop_if(b, if_gs_thread);
 
-      nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
+      nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
                             .memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
 
       nir_store_var(b, s->es_accepted_var, nir_imm_false(b), 0x1u);
@@ -1931,7 +1931,7 @@ ngg_build_streamout_buffer_info(nir_builder *b,
    }
    nir_pop_if(b, if_invocation_0);
 
-   nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+   nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
                       .memory_scope = SCOPE_WORKGROUP,
                       .memory_semantics = NIR_MEMORY_ACQ_REL,
                       .memory_modes = nir_var_mem_shared);
@@ -2409,7 +2409,7 @@ ac_nir_lower_ngg_nogs(nir_shader *shader, const ac_nir_lower_ngg_options *option
 
       /* Wait for culling to finish using LDS. */
       if (need_prim_id_store_shared || has_user_edgeflags) {
-         nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+         nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
                                .memory_scope = SCOPE_WORKGROUP,
                                .memory_semantics = NIR_MEMORY_ACQ_REL,
                                .memory_modes = nir_var_mem_shared);
@@ -2428,7 +2428,7 @@ ac_nir_lower_ngg_nogs(nir_shader *shader, const ac_nir_lower_ngg_options *option
       emit_ngg_nogs_prim_id_store_shared(b, &state);
 
       /* Wait for GS threads to store primitive ID in LDS. */
-      nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_WORKGROUP,
+      nir_barrier(b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_WORKGROUP,
                             .memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_shared);
    }
 
@@ -3182,7 +3182,7 @@ ngg_gs_cull_primitive(nir_builder *b, nir_ssa_def *tid_in_tg, nir_ssa_def *max_v
    nir_pop_if(b, if_prim_enable);
 
    /* Wait for LDS primflag access done. */
-   nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+   nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
                          .memory_scope = SCOPE_WORKGROUP,
                          .memory_semantics = NIR_MEMORY_ACQ_REL,
                          .memory_modes = nir_var_mem_shared);
@@ -3276,7 +3276,7 @@ ngg_gs_build_streamout(nir_builder *b, lower_ngg_gs_state *s)
    }
 
    /* Workgroup barrier: wait for LDS scratch reads finish. */
-   nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+   nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
                       .memory_scope = SCOPE_WORKGROUP,
                       .memory_semantics = NIR_MEMORY_ACQ_REL,
                       .memory_modes = nir_var_mem_shared);
@@ -3391,7 +3391,7 @@ ngg_gs_finale(nir_builder *b, lower_ngg_gs_state *s)
    ngg_gs_setup_vertex_compaction(b, vertex_live, tid_in_tg, exporter_tid_in_tg, s);
 
    /* Workgroup barrier: wait for all LDS stores to finish. */
-   nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
+   nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
                         .memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
 
    ngg_gs_export_primitives(b, max_prmcnt, tid_in_tg, exporter_tid_in_tg, out_vtx_primflag_0, s);
@@ -3438,7 +3438,7 @@ ac_nir_lower_ngg_gs(nir_shader *shader, const ac_nir_lower_ngg_options *options)
    nir_builder *b = &builder; /* This is to avoid the & */
 
    /* Workgroup barrier: wait for ES threads */
-   nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
+   nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
                          .memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
 
    state.lds_addr_gs_out_vtx = nir_load_lds_ngg_gs_out_vertex_base_amd(b);
@@ -3452,7 +3452,7 @@ ac_nir_lower_ngg_gs(nir_shader *shader, const ac_nir_lower_ngg_options *options)
    nir_pop_if(b, if_gs_thread);
 
    /* Workgroup barrier: wait for all GS threads to finish */
-   nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
+   nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
                          .memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
 
    if (state.streamout_enabled)
@@ -3952,7 +3952,7 @@ lower_ms_set_vertex_and_primitive_count(nir_builder *b,
 }
 
 static nir_ssa_def *
-update_ms_scoped_barrier(nir_builder *b,
+update_ms_barrier(nir_builder *b,
                          nir_intrinsic_instr *intrin,
                          lower_ngg_ms_state *s)
 {
@@ -3988,8 +3988,8 @@ lower_ms_intrinsic(nir_builder *b, nir_instr *instr, void *state)
    case nir_intrinsic_load_per_vertex_output:
    case nir_intrinsic_load_per_primitive_output:
       return ms_load_arrayed_output_intrin(b, intrin, s);
-   case nir_intrinsic_scoped_barrier:
-      return update_ms_scoped_barrier(b, intrin, s);
+   case nir_intrinsic_barrier:
+      return update_ms_barrier(b, intrin, s);
    case nir_intrinsic_load_workgroup_index:
       return lower_ms_load_workgroup_index(b, intrin, s);
    case nir_intrinsic_set_vertex_and_primitive_count:
@@ -4013,7 +4013,7 @@ filter_ms_intrinsic(const nir_instr *instr,
           intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
           intrin->intrinsic == nir_intrinsic_store_per_primitive_output ||
           intrin->intrinsic == nir_intrinsic_load_per_primitive_output ||
-          intrin->intrinsic == nir_intrinsic_scoped_barrier ||
+          intrin->intrinsic == nir_intrinsic_barrier ||
           intrin->intrinsic == nir_intrinsic_load_workgroup_index ||
           intrin->intrinsic == nir_intrinsic_set_vertex_and_primitive_count;
 }
@@ -4108,14 +4108,14 @@ ms_emit_legacy_workgroup_index(nir_builder *b, lower_ngg_ms_state *s)
       nir_if *if_wave_0 = nir_push_if(b, nir_ieq_imm(b, wave_id, 0));
       {
          nir_store_shared(b, workgroup_index, zero, .base = workgroup_index_lds_addr);
-         nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+         nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
                                .memory_scope = SCOPE_WORKGROUP,
                                .memory_semantics = NIR_MEMORY_ACQ_REL,
                                .memory_modes = nir_var_mem_shared);
       }
       nir_push_else(b, if_wave_0);
       {
-         nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+         nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
                                .memory_scope = SCOPE_WORKGROUP,
                                .memory_semantics = NIR_MEMORY_ACQ_REL,
                                .memory_modes = nir_var_mem_shared);
@@ -4174,7 +4174,7 @@ set_ms_final_output_counts(nir_builder *b,
       }
       nir_pop_if(b, if_elected);
 
-      nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+      nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
                             .memory_scope = SCOPE_WORKGROUP,
                             .memory_semantics = NIR_MEMORY_ACQ_REL,
                             .memory_modes = nir_var_mem_shared);
@@ -4183,7 +4183,7 @@ set_ms_final_output_counts(nir_builder *b,
    }
    nir_push_else(b, if_wave_0);
    {
-      nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+      nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
                             .memory_scope = SCOPE_WORKGROUP,
                             .memory_semantics = NIR_MEMORY_ACQ_REL,
                             .memory_modes = nir_var_mem_shared);
@@ -4305,7 +4305,7 @@ emit_ms_finale(nir_builder *b, lower_ngg_ms_state *s)
    nir_block *last_block = nir_impl_last_block(b->impl);
    b->cursor = nir_after_block(last_block);
 
-   nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
+   nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
                          .memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_shader_out|nir_var_mem_shared);
 
    nir_ssa_def *num_prm;
@@ -4461,7 +4461,7 @@ handle_smaller_ms_api_workgroup(nir_builder *b,
 
             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
             bool is_workgroup_barrier =
-               intrin->intrinsic == nir_intrinsic_scoped_barrier &&
+               intrin->intrinsic == nir_intrinsic_barrier &&
                nir_intrinsic_execution_scope(intrin) == SCOPE_WORKGROUP;
 
             if (!is_workgroup_barrier)
@@ -4500,7 +4500,7 @@ handle_smaller_ms_api_workgroup(nir_builder *b,
       }
       nir_pop_if(b, if_first_in_workgroup);
 
-      nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+      nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
                             .memory_scope = SCOPE_WORKGROUP,
                             .memory_semantics = NIR_MEMORY_ACQ_REL,
                             .memory_modes = nir_var_shader_out | nir_var_mem_shared);
@@ -4522,7 +4522,7 @@ handle_smaller_ms_api_workgroup(nir_builder *b,
          }
          nir_pop_if(b, if_elected_again);
 
-         nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+         nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
                                .memory_scope = SCOPE_WORKGROUP,
                                .memory_semantics = NIR_MEMORY_ACQ_REL,
                                .memory_modes = nir_var_shader_out | nir_var_mem_shared);
@@ -4545,7 +4545,7 @@ handle_smaller_ms_api_workgroup(nir_builder *b,
          {
             nir_loop *loop = nir_push_loop(b);
             {
-               nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+               nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
                                      .memory_scope = SCOPE_WORKGROUP,
                                      .memory_semantics = NIR_MEMORY_ACQ_REL,
                                      .memory_modes = nir_var_shader_out | nir_var_mem_shared);
index bb6e0a5..2fa083d 100644 (file)
@@ -153,7 +153,7 @@ lower_task_launch_mesh_workgroups(nir_builder *b,
     * always a waitcnt_vscnt instruction in order to avoid a race condition
     * between payload stores and their loads after mesh shaders launch.
     */
-   nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+   nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
                          .memory_scope = SCOPE_DEVICE,
                          .memory_semantics = NIR_MEMORY_ACQ_REL,
                          .memory_modes = nir_var_mem_task_payload | nir_var_shader_out |
index 8035af4..eb79653 100644 (file)
@@ -484,7 +484,7 @@ lower_hs_output_load(nir_builder *b,
 }
 
 static void
-update_hs_scoped_barrier(nir_intrinsic_instr *intrin, lower_tess_io_state *st)
+update_hs_barrier(nir_intrinsic_instr *intrin, lower_tess_io_state *st)
 {
    /* Output loads and stores are lowered to shared memory access,
     * so we have to update the barriers to also reflect this.
@@ -519,8 +519,8 @@ lower_hs_output_access(nir_builder *b,
    } else if (intrin->intrinsic == nir_intrinsic_load_output ||
               intrin->intrinsic == nir_intrinsic_load_per_vertex_output) {
       return lower_hs_output_load(b, intrin, st);
-   } else if (intrin->intrinsic == nir_intrinsic_scoped_barrier) {
-      update_hs_scoped_barrier(intrin, st);
+   } else if (intrin->intrinsic == nir_intrinsic_barrier) {
+      update_hs_barrier(intrin, st);
       return NIR_LOWER_INSTR_PROGRESS;
    } else {
       unreachable("intrinsic not supported by lower_hs_output_access");
@@ -567,7 +567,7 @@ hs_emit_write_tess_factors(nir_shader *shader,
       mesa_scope scope = st->tcs_out_patch_fits_subgroup ?
                         SCOPE_SUBGROUP : SCOPE_WORKGROUP;
 
-      nir_scoped_barrier(b, .execution_scope = scope, .memory_scope = scope,
+      nir_barrier(b, .execution_scope = scope, .memory_scope = scope,
                          .memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_shared);
    }
 
@@ -722,7 +722,7 @@ filter_hs_output_access(const nir_instr *instr,
           intrin->intrinsic == nir_intrinsic_store_per_vertex_output ||
           intrin->intrinsic == nir_intrinsic_load_output ||
           intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
-          intrin->intrinsic == nir_intrinsic_scoped_barrier;
+          intrin->intrinsic == nir_intrinsic_barrier;
 }
 
 static bool
index 838f6a4..6080f7b 100644 (file)
@@ -7117,7 +7117,7 @@ translate_nir_scope(mesa_scope scope)
 }
 
 void
-emit_scoped_barrier(isel_context* ctx, nir_intrinsic_instr* instr)
+emit_barrier(isel_context* ctx, nir_intrinsic_instr* instr)
 {
    Builder bld(ctx->program, ctx->block);
 
@@ -8126,7 +8126,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr)
    case nir_intrinsic_ssbo_atomic_swap: visit_atomic_ssbo(ctx, instr); break;
    case nir_intrinsic_load_scratch: visit_load_scratch(ctx, instr); break;
    case nir_intrinsic_store_scratch: visit_store_scratch(ctx, instr); break;
-   case nir_intrinsic_scoped_barrier: emit_scoped_barrier(ctx, instr); break;
+   case nir_intrinsic_barrier: emit_barrier(ctx, instr); break;
    case nir_intrinsic_load_num_workgroups: {
       Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
       if (ctx->options->load_grid_size_from_user_sgpr) {
index 03b74cb..5233e46 100644 (file)
@@ -3257,7 +3257,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
    case nir_intrinsic_demote_if:
       emit_demote(ctx, instr);
       break;
-   case nir_intrinsic_scoped_barrier: {
+   case nir_intrinsic_barrier: {
       assert(!(nir_intrinsic_memory_semantics(instr) &
                (NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_MAKE_VISIBLE)));
 
index df9e8d8..0a3bbe1 100644 (file)
@@ -66,8 +66,8 @@ build_expand_depth_stencil_compute_shader(struct radv_device *dev)
     * creating a vmcnt(0) because it expects the L1 cache to keep memory
     * operations in-order for the same workgroup. The vmcnt(0) seems
     * necessary however. */
-   nir_scoped_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE,
-                      .memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo);
+   nir_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE,
+               .memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo);
 
    nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, global_id, nir_ssa_undef(&b, 1, 32), data,
                          nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
index 686a375..3bfabd6 100644 (file)
@@ -64,8 +64,8 @@ build_dcc_decompress_compute_shader(struct radv_device *dev)
     * creating a vmcnt(0) because it expects the L1 cache to keep memory
     * operations in-order for the same workgroup. The vmcnt(0) seems
     * necessary however. */
-   nir_scoped_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE,
-                      .memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo);
+   nir_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE,
+               .memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo);
 
    nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_ssa_undef(&b, 1, 32), data,
                          nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
index 6a166f2..469e997 100644 (file)
@@ -1073,7 +1073,7 @@ agx_emit_intrinsic(agx_builder *b, nir_intrinsic_instr *instr)
       return agx_load_compute_dimension(
          b, dst, instr, AGX_SR_THREAD_POSITION_IN_THREADGROUP_X);
 
-   case nir_intrinsic_scoped_barrier: {
+   case nir_intrinsic_barrier: {
       assert(!b->shader->is_preamble && "invalid");
 
       bool needs_image_barriers = false;
index 1bc1150..4472af7 100644 (file)
@@ -3526,7 +3526,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
                 break;
         }
 
-        case nir_intrinsic_scoped_barrier:
+        case nir_intrinsic_barrier:
                 /* Ensure that the TMU operations before the barrier are flushed
                  * before the ones after the barrier.
                  */
index 93a183f..9e8076a 100644 (file)
@@ -1131,7 +1131,7 @@ nir_visitor::visit(ir_call *ir)
       case ir_intrinsic_memory_barrier_shared:
       case ir_intrinsic_memory_barrier_atomic_counter:
       case ir_intrinsic_group_memory_barrier:
-         op = nir_intrinsic_scoped_barrier;
+         op = nir_intrinsic_barrier;
          break;
       case ir_intrinsic_image_size:
          op = nir_intrinsic_image_deref_size;
@@ -1387,8 +1387,8 @@ nir_visitor::visit(ir_call *ir)
          nir_builder_instr_insert(&b, &instr->instr);
          break;
       }
-      case nir_intrinsic_scoped_barrier: {
-         /* The nir_intrinsic_scoped_barrier follows the general
+      case nir_intrinsic_barrier: {
+         /* The nir_intrinsic_barrier follows the general
           * semantics of SPIR-V memory barriers, so this and other memory
           * barriers use the mapping based on GLSL->SPIR-V from
           *
@@ -2657,11 +2657,11 @@ void
 nir_visitor::visit(ir_barrier *)
 {
    if (shader->info.stage == MESA_SHADER_COMPUTE) {
-      nir_scoped_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
-                         NIR_MEMORY_ACQ_REL, nir_var_mem_shared);
+      nir_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
+                      NIR_MEMORY_ACQ_REL, nir_var_mem_shared);
    } else if (shader->info.stage == MESA_SHADER_TESS_CTRL) {
-      nir_scoped_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
-                         NIR_MEMORY_ACQ_REL, nir_var_shader_out);
+      nir_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
+                      NIR_MEMORY_ACQ_REL, nir_var_shader_out);
    }
 }
 
index 465de5f..4a08b34 100644 (file)
@@ -1921,7 +1921,7 @@ nir_scoped_memory_barrier(nir_builder *b,
                           nir_memory_semantics semantics,
                           nir_variable_mode modes)
 {
-   nir_scoped_barrier(b, SCOPE_NONE, scope, semantics, modes);
+   nir_barrier(b, SCOPE_NONE, scope, semantics, modes);
 }
 
 nir_ssa_def *
index e2377a7..1519693 100644 (file)
@@ -758,7 +758,7 @@ gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader,
 
       break;
 
-   case nir_intrinsic_scoped_barrier:
+   case nir_intrinsic_barrier:
       shader->info.uses_control_barrier |=
          nir_intrinsic_execution_scope(instr) != SCOPE_NONE;
 
index 955a87c..980cb61 100644 (file)
@@ -415,7 +415,7 @@ barrier("terminate")
 # OpMemoryBarrier and OpControlBarrier, used to implement Vulkan Memory Model.
 # Storage that the barrier applies is represented using NIR variable modes.
 # For an OpMemoryBarrier, set EXECUTION_SCOPE to SCOPE_NONE.
-intrinsic("scoped_barrier",
+intrinsic("barrier",
           indices=[EXECUTION_SCOPE, MEMORY_SCOPE, MEMORY_SEMANTICS, MEMORY_MODES])
 
 # Shader clock intrinsic with semantics analogous to the clock2x32ARB()
index 15952e6..81fbcf4 100644 (file)
@@ -105,7 +105,7 @@ visit_instr(nir_instr *instr, uint32_t *cur_modes, unsigned vis_avail_sem)
       return false;
    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
 
-   if (intrin->intrinsic == nir_intrinsic_scoped_barrier &&
+   if (intrin->intrinsic == nir_intrinsic_barrier &&
        (nir_intrinsic_memory_semantics(intrin) & vis_avail_sem)) {
       *cur_modes |= nir_intrinsic_memory_modes(intrin);
 
index 7c19451..565665d 100644 (file)
@@ -89,7 +89,7 @@ append_launch_mesh_workgroups_to_nv_task(nir_builder *b,
    nir_ssa_def *zero = nir_imm_int(b, 0);
    nir_store_shared(b, zero, zero, .base = s->task_count_shared_addr);
 
-   nir_scoped_barrier(b,
+   nir_barrier(b,
          .execution_scope = SCOPE_WORKGROUP,
          .memory_scope = SCOPE_WORKGROUP,
          .memory_semantics = NIR_MEMORY_RELEASE,
@@ -100,7 +100,7 @@ append_launch_mesh_workgroups_to_nv_task(nir_builder *b,
     */
    b->cursor = nir_after_cf_list(&b->impl->body);
 
-   nir_scoped_barrier(b,
+   nir_barrier(b,
          .execution_scope = SCOPE_WORKGROUP,
          .memory_scope = SCOPE_WORKGROUP,
          .memory_semantics = NIR_MEMORY_ACQUIRE,
@@ -232,10 +232,10 @@ emit_shared_to_payload_copy(nir_builder *b,
    /* Wait for all previous shared stores to finish.
     * This is necessary because we placed the payload in shared memory.
     */
-   nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
-                         .memory_scope = SCOPE_WORKGROUP,
-                         .memory_semantics = NIR_MEMORY_ACQ_REL,
-                         .memory_modes = nir_var_mem_shared);
+   nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
+                  .memory_scope = SCOPE_WORKGROUP,
+                  .memory_semantics = NIR_MEMORY_ACQ_REL,
+                  .memory_modes = nir_var_mem_shared);
 
    /* Payload_size is a size of user-accessible payload, but on some
     * hardware (e.g. Intel) payload has a private header, which we have
index bd128ee..40ccf7a 100644 (file)
@@ -185,8 +185,8 @@ nir_zero_initialize_shared_memory(nir_shader *shader,
    }
    nir_pop_loop(&b, loop);
 
-   nir_scoped_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
-                      NIR_MEMORY_ACQ_REL, nir_var_mem_shared);
+   nir_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP, NIR_MEMORY_ACQ_REL,
+               nir_var_mem_shared);
 
    nir_metadata_preserve(nir_shader_get_entrypoint(shader), nir_metadata_none);
 
index 3983b32..a4d1473 100644 (file)
@@ -53,7 +53,7 @@ nir_opt_combine_barriers_impl(
          }
 
          nir_intrinsic_instr *current = nir_instr_as_intrinsic(instr);
-         if (current->intrinsic != nir_intrinsic_scoped_barrier) {
+         if (current->intrinsic != nir_intrinsic_barrier) {
             prev = NULL;
             continue;
          }
index bf0c249..654e1e2 100644 (file)
@@ -314,7 +314,7 @@ combine_stores_block(struct combine_stores_state *state, nir_block *block)
          }
          break;
 
-      case nir_intrinsic_scoped_barrier:
+      case nir_intrinsic_barrier:
          if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_RELEASE) {
             combine_stores_with_modes(state,
                                       nir_intrinsic_memory_modes(intrin));
index 1dc81e6..a9c4525 100644 (file)
@@ -186,7 +186,7 @@ gather_vars_written(struct copy_prop_var_state *state,
 
          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
          switch (intrin->intrinsic) {
-         case nir_intrinsic_scoped_barrier:
+         case nir_intrinsic_barrier:
             if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
                written->modes |= nir_intrinsic_memory_modes(intrin);
             break;
@@ -1043,7 +1043,7 @@ copy_prop_vars_block(struct copy_prop_var_state *state,
 
       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
       switch (intrin->intrinsic) {
-      case nir_intrinsic_scoped_barrier:
+      case nir_intrinsic_barrier:
          if (debug) dump_instr(instr);
 
          if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
index 4a4d403..71344cd 100644 (file)
@@ -132,7 +132,7 @@ remove_dead_write_vars_local(void *mem_ctx, nir_shader *shader, nir_block *block
 
       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
       switch (intrin->intrinsic) {
-      case nir_intrinsic_scoped_barrier: {
+      case nir_intrinsic_barrier: {
          if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_RELEASE) {
             clear_unused_for_modes(&unused_writes,
                                    nir_intrinsic_memory_modes(intrin));
index 90abbcc..271783a 100644 (file)
@@ -1318,7 +1318,7 @@ handle_barrier(struct vectorize_ctx *ctx, bool *progress, nir_function_impl *imp
          acquire = false;
          modes = nir_var_all;
          break;
-      case nir_intrinsic_scoped_barrier:
+      case nir_intrinsic_barrier:
          if (nir_intrinsic_memory_scope(intrin) == SCOPE_NONE)
             break;
 
index ee87cb0..953b03a 100644 (file)
@@ -407,7 +407,7 @@ nir_schedule_intrinsic_deps(nir_deps_state *state,
       add_write_dep(state, &state->store_shared, n);
       break;
 
-   case nir_intrinsic_scoped_barrier: {
+   case nir_intrinsic_barrier: {
       const nir_variable_mode modes = nir_intrinsic_memory_modes(instr);
 
       if (modes & nir_var_mem_shared)
index 3ef46b3..f3d7887 100644 (file)
@@ -819,7 +819,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_memory_barrier)
 TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_barrier)
 {
    create_load(nir_var_mem_ssbo, 0, 0, 0x1);
-   nir_scoped_barrier(b, SCOPE_WORKGROUP, SCOPE_NONE,
+   nir_barrier(b, SCOPE_WORKGROUP, SCOPE_NONE,
                       (nir_memory_semantics)0, (nir_variable_mode)0);
    create_load(nir_var_mem_ssbo, 0, 4, 0x2);
 
index 7c0beab..cd454f9 100644 (file)
@@ -2631,8 +2631,8 @@ vtn_emit_scoped_control_barrier(struct vtn_builder *b, SpvScope exec_scope,
    else
       nir_mem_scope = vtn_translate_scope(b, mem_scope);
 
-   nir_scoped_barrier(&b->nb, .execution_scope=nir_exec_scope, .memory_scope=nir_mem_scope,
-                              .memory_semantics=nir_semantics, .memory_modes=modes);
+   nir_barrier(&b->nb, .execution_scope=nir_exec_scope, .memory_scope=nir_mem_scope,
+                       .memory_semantics=nir_semantics, .memory_modes=modes);
 }
 
 void
@@ -2647,9 +2647,9 @@ vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope,
    if (nir_semantics == 0 || modes == 0)
       return;
 
-   nir_scoped_barrier(&b->nb, .memory_scope=vtn_translate_scope(b, scope),
-                              .memory_semantics=nir_semantics,
-                              .memory_modes=modes);
+   nir_barrier(&b->nb, .memory_scope=vtn_translate_scope(b, scope),
+                       .memory_semantics=nir_semantics,
+                       .memory_modes=modes);
 }
 
 struct vtn_ssa_value *
index 35430db..6e59730 100644 (file)
@@ -79,7 +79,7 @@ TEST_F(AvailabilityVisibility, opload_vis)
 
    get_nir(sizeof(words) / sizeof(words[0]), words);
 
-   nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_scoped_barrier, 0);
+   nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_barrier, 0);
    ASSERT_NE(intrinsic, nullptr);
 
    EXPECT_EQ(nir_intrinsic_memory_semantics(intrinsic), NIR_MEMORY_MAKE_VISIBLE | NIR_MEMORY_ACQUIRE);
@@ -143,7 +143,7 @@ TEST_F(AvailabilityVisibility, opstore_avail)
 
    get_nir(sizeof(words) / sizeof(words[0]), words);
 
-   nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_scoped_barrier, 0);
+   nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_barrier, 0);
    ASSERT_NE(intrinsic, nullptr);
 
    EXPECT_EQ(nir_intrinsic_memory_semantics(intrinsic), NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_RELEASE);
@@ -207,8 +207,8 @@ TEST_F(AvailabilityVisibility, opcopymemory_visavail_both_combined)
 
    get_nir(sizeof(words) / sizeof(words[0]), words);
 
-   nir_intrinsic_instr *first = find_intrinsic(nir_intrinsic_scoped_barrier, 0);
-   nir_intrinsic_instr *second = find_intrinsic(nir_intrinsic_scoped_barrier, 1);
+   nir_intrinsic_instr *first = find_intrinsic(nir_intrinsic_barrier, 0);
+   nir_intrinsic_instr *second = find_intrinsic(nir_intrinsic_barrier, 1);
    ASSERT_NE(first, nullptr);
    ASSERT_NE(second, nullptr);
 
@@ -279,8 +279,8 @@ TEST_F(AvailabilityVisibility, opcopymemory_visavail_both_separate)
 
    get_nir(sizeof(words) / sizeof(words[0]), words);
 
-   nir_intrinsic_instr *first = find_intrinsic(nir_intrinsic_scoped_barrier, 0);
-   nir_intrinsic_instr *second = find_intrinsic(nir_intrinsic_scoped_barrier, 1);
+   nir_intrinsic_instr *first = find_intrinsic(nir_intrinsic_barrier, 0);
+   nir_intrinsic_instr *second = find_intrinsic(nir_intrinsic_barrier, 1);
    ASSERT_NE(first, nullptr);
    ASSERT_NE(second, nullptr);
 
@@ -349,7 +349,7 @@ TEST_F(AvailabilityVisibility, opcopymemory_avail)
 
    get_nir(sizeof(words) / sizeof(words[0]), words);
 
-   nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_scoped_barrier, 0);
+   nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_barrier, 0);
    ASSERT_NE(intrinsic, nullptr);
 
    EXPECT_EQ(nir_intrinsic_memory_semantics(intrinsic), NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_RELEASE);
@@ -412,7 +412,7 @@ TEST_F(AvailabilityVisibility, opcopymemory_vis)
 
    get_nir(sizeof(words) / sizeof(words[0]), words);
 
-   nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_scoped_barrier, 0);
+   nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_barrier, 0);
    ASSERT_NE(intrinsic, nullptr);
 
    EXPECT_EQ(nir_intrinsic_memory_semantics(intrinsic), NIR_MEMORY_MAKE_VISIBLE | NIR_MEMORY_ACQUIRE);
index a9ce473..3e9b673 100644 (file)
@@ -598,12 +598,12 @@ handle_core(struct vtn_builder *b, uint32_t opcode,
        * The libclc we have uses a __local pointer but clang gives us generic
        * pointers.  Fortunately, the whole function is just a barrier.
        */
-      nir_scoped_barrier(&b->nb, .execution_scope = SCOPE_WORKGROUP,
-                                 .memory_scope = SCOPE_WORKGROUP,
-                                 .memory_semantics = NIR_MEMORY_ACQUIRE |
-                                                     NIR_MEMORY_RELEASE,
-                                 .memory_modes = nir_var_mem_shared |
-                                                 nir_var_mem_global);
+      nir_barrier(&b->nb, .execution_scope = SCOPE_WORKGROUP,
+                          .memory_scope = SCOPE_WORKGROUP,
+                          .memory_semantics = NIR_MEMORY_ACQUIRE |
+                                              NIR_MEMORY_RELEASE,
+                          .memory_modes = nir_var_mem_shared |
+                                          nir_var_mem_global);
       break;
    }
    default:
index 5d0ee2b..5e067db 100644 (file)
@@ -2243,7 +2243,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
    case nir_intrinsic_bindless_image_atomic_swap:
       dst[0] = ctx->funcs->emit_intrinsic_atomic_image(ctx, intr);
       break;
-   case nir_intrinsic_scoped_barrier:
+   case nir_intrinsic_barrier:
       emit_intrinsic_barrier(ctx, intr);
       /* note that blk ptr no longer valid, make that obvious: */
       b = NULL;
index 80bed87..30a5972 100644 (file)
@@ -2249,7 +2249,7 @@ visit_intrinsic(struct lp_build_nir_context *bld_base,
    case nir_intrinsic_shared_atomic_swap:
       visit_shared_atomic(bld_base, instr, result);
       break;
-   case nir_intrinsic_scoped_barrier:
+   case nir_intrinsic_barrier:
       visit_barrier(bld_base, instr);
       break;
    case nir_intrinsic_load_kernel_input:
index 913e263..68ccb57 100644 (file)
@@ -2644,7 +2644,7 @@ ntt_emit_intrinsic(struct ntt_compile *c, nir_intrinsic_instr *instr)
       ntt_emit_image_load_store(c, instr);
       break;
 
-   case nir_intrinsic_scoped_barrier:
+   case nir_intrinsic_barrier:
       ntt_emit_barrier(c, instr);
       break;
 
index 0e426e4..384073f 100644 (file)
@@ -959,7 +959,7 @@ ttn_lit(nir_builder *b, nir_ssa_def **src)
 static void
 ttn_barrier(nir_builder *b)
 {
-   nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP);
+   nir_barrier(b, .execution_scope = SCOPE_WORKGROUP);
 }
 
 static void
index 0eed9be..bb9e9a8 100644 (file)
@@ -669,7 +669,7 @@ Shader::scan_instruction(nir_instr *instr)
       m_flags.set(sh_writes_memory);
       m_flags.set(sh_uses_images);
       break;
-   case nir_intrinsic_scoped_barrier:
+   case nir_intrinsic_barrier:
       m_chain_instr.prepare_mem_barrier |=
             (nir_intrinsic_memory_modes(intr) &
              (nir_var_mem_ssbo | nir_var_mem_global | nir_var_image) &&
@@ -907,8 +907,8 @@ Shader::process_intrinsic(nir_intrinsic_instr *intr)
       return emit_load_tcs_param_base(intr, 0);
    case nir_intrinsic_load_tcs_out_param_base_r600:
       return emit_load_tcs_param_base(intr, 16);
-   case nir_intrinsic_scoped_barrier:
-      return emit_scoped_barrier(intr);
+   case nir_intrinsic_barrier:
+      return emit_barrier(intr);
    case nir_intrinsic_shared_atomic:
    case nir_intrinsic_shared_atomic_swap:
       return emit_atomic_local_shared(intr);
@@ -1497,7 +1497,7 @@ Shader::emit_group_barrier(nir_intrinsic_instr *intr)
    return true;
 }
 
-bool Shader::emit_scoped_barrier(nir_intrinsic_instr *intr)
+bool Shader::emit_barrier(nir_intrinsic_instr *intr)
 {
 
    if ((nir_intrinsic_execution_scope(intr) == SCOPE_WORKGROUP)) {
index 559d9d7..5591313 100644 (file)
@@ -320,7 +320,7 @@ private:
    bool emit_group_barrier(nir_intrinsic_instr *intr);
    bool emit_shader_clock(nir_intrinsic_instr *instr);
    bool emit_wait_ack();
-   bool emit_scoped_barrier(nir_intrinsic_instr *instr);
+   bool emit_barrier(nir_intrinsic_instr *instr);
    bool emit_load_reg(nir_intrinsic_instr *intr);
    bool emit_load_reg_indirect(nir_intrinsic_instr *intr);
    bool emit_store_reg(nir_intrinsic_instr *intr);
index cebb57d..e1e92cc 100644 (file)
@@ -71,7 +71,7 @@ static void scan_tess_ctrl(nir_cf_node *cf_node, unsigned *upper_block_tf_writem
             continue;
 
          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
-         if (intrin->intrinsic == nir_intrinsic_scoped_barrier &&
+         if (intrin->intrinsic == nir_intrinsic_barrier &&
              nir_intrinsic_execution_scope(intrin) >= SCOPE_WORKGROUP) {
 
             /* If we find a barrier in nested control flow put this in the
index fe03b85..4a53315 100644 (file)
@@ -3661,7 +3661,7 @@ emit_intrinsic(struct ntv_context *ctx, nir_intrinsic_instr *intr)
                           SpvBuiltInTessCoord, nir_type_float);
       break;
 
-   case nir_intrinsic_scoped_barrier:
+   case nir_intrinsic_barrier:
       emit_barrier(ctx, intr);
       break;
 
index 63df978..bd9857e 100644 (file)
@@ -125,12 +125,12 @@ shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
 }
 
 static bool
-remove_scoped_barriers_impl(nir_builder *b, nir_instr *instr, void *data)
+remove_barriers_impl(nir_builder *b, nir_instr *instr, void *data)
 {
    if (instr->type != nir_instr_type_intrinsic)
       return false;
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
-   if (intr->intrinsic != nir_intrinsic_scoped_barrier)
+   if (intr->intrinsic != nir_intrinsic_barrier)
       return false;
    if (data) {
       if (nir_intrinsic_memory_scope(intr) == SCOPE_WORKGROUP ||
@@ -143,9 +143,9 @@ remove_scoped_barriers_impl(nir_builder *b, nir_instr *instr, void *data)
 }
 
 static bool
-remove_scoped_barriers(nir_shader *nir, bool is_compute)
+remove_barriers(nir_shader *nir, bool is_compute)
 {
-   return nir_shader_instructions_pass(nir, remove_scoped_barriers_impl, nir_metadata_dominance, (void*)is_compute);
+   return nir_shader_instructions_pass(nir, remove_barriers_impl, nir_metadata_dominance, (void*)is_compute);
 }
 
 static bool
@@ -371,7 +371,7 @@ static void
 lvp_shader_lower(struct lvp_device *pdevice, nir_shader *nir, struct lvp_shader *shader, struct lvp_pipeline_layout *layout)
 {
    if (nir->info.stage != MESA_SHADER_TESS_CTRL)
-      NIR_PASS_V(nir, remove_scoped_barriers, nir->info.stage == MESA_SHADER_COMPUTE || nir->info.stage == MESA_SHADER_MESH || nir->info.stage == MESA_SHADER_TASK);
+      NIR_PASS_V(nir, remove_barriers, nir->info.stage == MESA_SHADER_COMPUTE || nir->info.stage == MESA_SHADER_MESH || nir->info.stage == MESA_SHADER_TASK);
 
    const struct nir_lower_sysvals_to_varyings_options sysvals_to_varyings = {
       .frag_coord = true,
index 58a1c71..b7505d8 100644 (file)
@@ -2779,7 +2779,7 @@ fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld,
       bld.MOV(retype(dst, invocation_id.type), invocation_id);
       break;
 
-   case nir_intrinsic_scoped_barrier:
+   case nir_intrinsic_barrier:
       if (nir_intrinsic_memory_scope(instr) != SCOPE_NONE)
          nir_emit_intrinsic(bld, instr);
       if (nir_intrinsic_execution_scope(instr) == SCOPE_WORKGROUP) {
@@ -3708,7 +3708,7 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
       dest = get_nir_dest(instr->dest);
 
    switch (instr->intrinsic) {
-   case nir_intrinsic_scoped_barrier:
+   case nir_intrinsic_barrier:
       if (nir_intrinsic_memory_scope(instr) != SCOPE_NONE)
          nir_emit_intrinsic(bld, instr);
       if (nir_intrinsic_execution_scope(instr) == SCOPE_WORKGROUP) {
@@ -4549,7 +4549,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
       break;
    }
 
-   case nir_intrinsic_scoped_barrier:
+   case nir_intrinsic_barrier:
    case nir_intrinsic_begin_invocation_interlock:
    case nir_intrinsic_end_invocation_interlock: {
       bool ugm_fence, slm_fence, tgm_fence, urb_fence;
@@ -4560,7 +4560,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
        */
 
       switch (instr->intrinsic) {
-      case nir_intrinsic_scoped_barrier: {
+      case nir_intrinsic_barrier: {
          /* Note we only care about the memory part of the
           * barrier.  The execution part will be taken care
           * of by the stage specific intrinsic handler functions.
index 5d0c67a..3f8693b 100644 (file)
@@ -1168,7 +1168,7 @@ brw_nir_initialize_mue(nir_shader *nir,
     * may start filling MUE before other finished initializing.
     */
    if (workgroup_size > dispatch_width) {
-      nir_scoped_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
+      nir_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
                          NIR_MEMORY_ACQ_REL, nir_var_shader_out);
    }
 
index 40aab80..b1f6aa5 100644 (file)
@@ -728,7 +728,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
       break;
    }
 
-   case nir_intrinsic_scoped_barrier: {
+   case nir_intrinsic_barrier: {
       if (nir_intrinsic_memory_scope(instr) == SCOPE_NONE)
          break;
       const vec4_builder bld =
index 9f7b2e5..3bbbcac 100644 (file)
@@ -304,7 +304,7 @@ vec4_tcs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
       break;
    }
 
-   case nir_intrinsic_scoped_barrier:
+   case nir_intrinsic_barrier:
       if (nir_intrinsic_memory_scope(instr) != SCOPE_NONE)
          vec4_visitor::nir_emit_intrinsic(instr);
       if (nir_intrinsic_execution_scope(instr) == SCOPE_WORKGROUP) {
index 66e80d4..c40b23c 100644 (file)
@@ -156,7 +156,7 @@ anv_mesh_convert_attrs_prim_to_vert(struct nir_shader *nir,
    nir_builder b = nir_builder_at(nir_after_cf_list(&impl->body));
 
    /* wait for all subgroups to finish */
-   nir_scoped_barrier(&b, SCOPE_WORKGROUP);
+   nir_barrier(&b, SCOPE_WORKGROUP);
 
    nir_ssa_def *zero = nir_imm_int(&b, 0);
 
index 75283fd..d9e64f1 100644 (file)
@@ -1946,7 +1946,7 @@ lower_subgroup_id(nir_builder *b, nir_instr *instr, void *data)
       nir_store_deref(b, counter_deref, nir_imm_int(b, 0), 1);
       nir_pop_if(b, nif);
 
-      nir_scoped_barrier(b,
+      nir_barrier(b,
                          .execution_scope = SCOPE_WORKGROUP,
                          .memory_scope = SCOPE_WORKGROUP,
                          .memory_semantics = NIR_MEMORY_ACQ_REL,
index 29a3654..2a7e39b 100644 (file)
@@ -30,7 +30,7 @@
 static bool
 is_memory_barrier_tcs_patch(const nir_intrinsic_instr *intr)
 {
-   if (intr->intrinsic == nir_intrinsic_scoped_barrier &&
+   if (intr->intrinsic == nir_intrinsic_barrier &&
        nir_intrinsic_memory_modes(intr) & nir_var_shader_out) {
       assert(nir_intrinsic_memory_modes(intr) == nir_var_shader_out);
       assert(nir_intrinsic_memory_scope(intr) == SCOPE_WORKGROUP);
@@ -259,7 +259,7 @@ dxil_nir_split_tess_ctrl(nir_shader *nir, nir_function **patch_const_func)
             nir_ssa_def_rewrite_uses(&intr->dest.ssa, state.count);
             break;
          }
-         case nir_intrinsic_scoped_barrier:
+         case nir_intrinsic_barrier:
             if (!is_memory_barrier_tcs_patch(intr))
                break;
 
index edcce78..68021c9 100644 (file)
@@ -4850,7 +4850,7 @@ emit_intrinsic(struct ntd_context *ctx, nir_intrinsic_instr *intr)
       return emit_emit_vertex(ctx, intr);
    case nir_intrinsic_end_primitive:
       return emit_end_primitive(ctx, intr);
-   case nir_intrinsic_scoped_barrier:
+   case nir_intrinsic_barrier:
       return emit_barrier(ctx, intr);
    case nir_intrinsic_ssbo_atomic:
       return emit_ssbo_atomic(ctx, intr);
index b204189..1810652 100644 (file)
@@ -2336,7 +2336,7 @@ Converter::visit(nir_intrinsic_instr *insn)
 
       break;
    }
-   case nir_intrinsic_scoped_barrier: {
+   case nir_intrinsic_barrier: {
       mesa_scope exec_scope = nir_intrinsic_execution_scope(insn);
       mesa_scope mem_scope = nir_intrinsic_memory_scope(insn);
       nir_variable_mode modes = nir_intrinsic_memory_modes(insn);
index 93a4eda..a442dd0 100644 (file)
@@ -1560,7 +1560,7 @@ bi_emit_intrinsic(bi_builder *b, nir_intrinsic_instr *instr)
       bi_emit_store(b, instr, BI_SEG_WLS);
       break;
 
-   case nir_intrinsic_scoped_barrier:
+   case nir_intrinsic_barrier:
       if (nir_intrinsic_execution_scope(instr) != SCOPE_NONE) {
          assert(b->shader->stage != MESA_SHADER_FRAGMENT);
          assert(nir_intrinsic_execution_scope(instr) > SCOPE_SUBGROUP &&
index 9473b60..79f9ea8 100644 (file)
@@ -1908,7 +1908,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
       emit_special(ctx, instr, 97);
       break;
 
-   case nir_intrinsic_scoped_barrier:
+   case nir_intrinsic_barrier:
       if (nir_intrinsic_execution_scope(instr) != SCOPE_NONE) {
          schedule_barrier(ctx);
          emit_control_barrier(ctx);